1 // SPDX-License-Identifier: GPL-2.0+
2 /*
3  * driver/usb/gadget/fsl_qe_udc.c
4  *
5  * Copyright (c) 2006-2008 Freescale Semiconductor, Inc. All rights reserved.
6  *
7  * 	Xie Xiaobo <X.Xie@freescale.com>
8  * 	Li Yang <leoli@freescale.com>
9  * 	Based on bareboard code from Shlomi Gridish.
10  *
11  * Description:
12  * Freescle QE/CPM USB Pheripheral Controller Driver
13  * The controller can be found on MPC8360, MPC8272, and etc.
14  * MPC8360 Rev 1.1 may need QE mircocode update
15  */
16 
17 #undef USB_TRACE
18 
19 #include <linux/module.h>
20 #include <linux/kernel.h>
21 #include <linux/ioport.h>
22 #include <linux/types.h>
23 #include <linux/errno.h>
24 #include <linux/err.h>
25 #include <linux/slab.h>
26 #include <linux/list.h>
27 #include <linux/interrupt.h>
28 #include <linux/io.h>
29 #include <linux/moduleparam.h>
30 #include <linux/of_address.h>
31 #include <linux/of_irq.h>
32 #include <linux/of_platform.h>
33 #include <linux/dma-mapping.h>
34 #include <linux/usb/ch9.h>
35 #include <linux/usb/gadget.h>
36 #include <linux/usb/otg.h>
37 #include <soc/fsl/qe/qe.h>
38 #include <asm/cpm.h>
39 #include <asm/dma.h>
40 #include <asm/reg.h>
41 #include "fsl_qe_udc.h"
42 
43 #define DRIVER_DESC     "Freescale QE/CPM USB Device Controller driver"
44 #define DRIVER_AUTHOR   "Xie XiaoBo"
45 #define DRIVER_VERSION  "1.0"
46 
47 #define DMA_ADDR_INVALID        (~(dma_addr_t)0)
48 
49 static const char driver_name[] = "fsl_qe_udc";
50 static const char driver_desc[] = DRIVER_DESC;
51 
52 /*ep name is important in gadget, it should obey the convention of ep_match()*/
53 static const char *const ep_name[] = {
54 	"ep0-control", /* everyone has ep0 */
55 	/* 3 configurable endpoints */
56 	"ep1",
57 	"ep2",
58 	"ep3",
59 };
60 
61 static const struct usb_endpoint_descriptor qe_ep0_desc = {
62 	.bLength =		USB_DT_ENDPOINT_SIZE,
63 	.bDescriptorType =	USB_DT_ENDPOINT,
64 
65 	.bEndpointAddress =	0,
66 	.bmAttributes =		USB_ENDPOINT_XFER_CONTROL,
67 	.wMaxPacketSize =	USB_MAX_CTRL_PAYLOAD,
68 };
69 
70 /********************************************************************
71  *      Internal Used Function Start
72 ********************************************************************/
73 /*-----------------------------------------------------------------
74  * done() - retire a request; caller blocked irqs
75  *--------------------------------------------------------------*/
done(struct qe_ep * ep,struct qe_req * req,int status)76 static void done(struct qe_ep *ep, struct qe_req *req, int status)
77 {
78 	struct qe_udc *udc = ep->udc;
79 	unsigned char stopped = ep->stopped;
80 
81 	/* the req->queue pointer is used by ep_queue() func, in which
82 	 * the request will be added into a udc_ep->queue 'd tail
83 	 * so here the req will be dropped from the ep->queue
84 	 */
85 	list_del_init(&req->queue);
86 
87 	/* req.status should be set as -EINPROGRESS in ep_queue() */
88 	if (req->req.status == -EINPROGRESS)
89 		req->req.status = status;
90 	else
91 		status = req->req.status;
92 
93 	if (req->mapped) {
94 		dma_unmap_single(udc->gadget.dev.parent,
95 			req->req.dma, req->req.length,
96 			ep_is_in(ep)
97 				? DMA_TO_DEVICE
98 				: DMA_FROM_DEVICE);
99 		req->req.dma = DMA_ADDR_INVALID;
100 		req->mapped = 0;
101 	} else
102 		dma_sync_single_for_cpu(udc->gadget.dev.parent,
103 			req->req.dma, req->req.length,
104 			ep_is_in(ep)
105 				? DMA_TO_DEVICE
106 				: DMA_FROM_DEVICE);
107 
108 	if (status && (status != -ESHUTDOWN))
109 		dev_vdbg(udc->dev, "complete %s req %p stat %d len %u/%u\n",
110 			ep->ep.name, &req->req, status,
111 			req->req.actual, req->req.length);
112 
113 	/* don't modify queue heads during completion callback */
114 	ep->stopped = 1;
115 	spin_unlock(&udc->lock);
116 
117 	usb_gadget_giveback_request(&ep->ep, &req->req);
118 
119 	spin_lock(&udc->lock);
120 
121 	ep->stopped = stopped;
122 }
123 
124 /*-----------------------------------------------------------------
125  * nuke(): delete all requests related to this ep
126  *--------------------------------------------------------------*/
nuke(struct qe_ep * ep,int status)127 static void nuke(struct qe_ep *ep, int status)
128 {
129 	/* Whether this eq has request linked */
130 	while (!list_empty(&ep->queue)) {
131 		struct qe_req *req = NULL;
132 		req = list_entry(ep->queue.next, struct qe_req, queue);
133 
134 		done(ep, req, status);
135 	}
136 }
137 
138 /*---------------------------------------------------------------------------*
139  * USB and Endpoint manipulate process, include parameter and register       *
140  *---------------------------------------------------------------------------*/
141 /* @value: 1--set stall 0--clean stall */
qe_eprx_stall_change(struct qe_ep * ep,int value)142 static int qe_eprx_stall_change(struct qe_ep *ep, int value)
143 {
144 	u16 tem_usep;
145 	u8 epnum = ep->epnum;
146 	struct qe_udc *udc = ep->udc;
147 
148 	tem_usep = in_be16(&udc->usb_regs->usb_usep[epnum]);
149 	tem_usep = tem_usep & ~USB_RHS_MASK;
150 	if (value == 1)
151 		tem_usep |= USB_RHS_STALL;
152 	else if (ep->dir == USB_DIR_IN)
153 		tem_usep |= USB_RHS_IGNORE_OUT;
154 
155 	out_be16(&udc->usb_regs->usb_usep[epnum], tem_usep);
156 	return 0;
157 }
158 
qe_eptx_stall_change(struct qe_ep * ep,int value)159 static int qe_eptx_stall_change(struct qe_ep *ep, int value)
160 {
161 	u16 tem_usep;
162 	u8 epnum = ep->epnum;
163 	struct qe_udc *udc = ep->udc;
164 
165 	tem_usep = in_be16(&udc->usb_regs->usb_usep[epnum]);
166 	tem_usep = tem_usep & ~USB_THS_MASK;
167 	if (value == 1)
168 		tem_usep |= USB_THS_STALL;
169 	else if (ep->dir == USB_DIR_OUT)
170 		tem_usep |= USB_THS_IGNORE_IN;
171 
172 	out_be16(&udc->usb_regs->usb_usep[epnum], tem_usep);
173 
174 	return 0;
175 }
176 
qe_ep0_stall(struct qe_udc * udc)177 static int qe_ep0_stall(struct qe_udc *udc)
178 {
179 	qe_eptx_stall_change(&udc->eps[0], 1);
180 	qe_eprx_stall_change(&udc->eps[0], 1);
181 	udc->ep0_state = WAIT_FOR_SETUP;
182 	udc->ep0_dir = 0;
183 	return 0;
184 }
185 
qe_eprx_nack(struct qe_ep * ep)186 static int qe_eprx_nack(struct qe_ep *ep)
187 {
188 	u8 epnum = ep->epnum;
189 	struct qe_udc *udc = ep->udc;
190 
191 	if (ep->state == EP_STATE_IDLE) {
192 		/* Set the ep's nack */
193 		clrsetbits_be16(&udc->usb_regs->usb_usep[epnum],
194 				USB_RHS_MASK, USB_RHS_NACK);
195 
196 		/* Mask Rx and Busy interrupts */
197 		clrbits16(&udc->usb_regs->usb_usbmr,
198 				(USB_E_RXB_MASK | USB_E_BSY_MASK));
199 
200 		ep->state = EP_STATE_NACK;
201 	}
202 	return 0;
203 }
204 
qe_eprx_normal(struct qe_ep * ep)205 static int qe_eprx_normal(struct qe_ep *ep)
206 {
207 	struct qe_udc *udc = ep->udc;
208 
209 	if (ep->state == EP_STATE_NACK) {
210 		clrsetbits_be16(&udc->usb_regs->usb_usep[ep->epnum],
211 				USB_RTHS_MASK, USB_THS_IGNORE_IN);
212 
213 		/* Unmask RX interrupts */
214 		out_be16(&udc->usb_regs->usb_usber,
215 				USB_E_BSY_MASK | USB_E_RXB_MASK);
216 		setbits16(&udc->usb_regs->usb_usbmr,
217 				(USB_E_RXB_MASK | USB_E_BSY_MASK));
218 
219 		ep->state = EP_STATE_IDLE;
220 		ep->has_data = 0;
221 	}
222 
223 	return 0;
224 }
225 
qe_ep_cmd_stoptx(struct qe_ep * ep)226 static int qe_ep_cmd_stoptx(struct qe_ep *ep)
227 {
228 	if (ep->udc->soc_type == PORT_CPM)
229 		cpm_command(CPM_USB_STOP_TX | (ep->epnum << CPM_USB_EP_SHIFT),
230 				CPM_USB_STOP_TX_OPCODE);
231 	else
232 		qe_issue_cmd(QE_USB_STOP_TX, QE_CR_SUBBLOCK_USB,
233 				ep->epnum, 0);
234 
235 	return 0;
236 }
237 
qe_ep_cmd_restarttx(struct qe_ep * ep)238 static int qe_ep_cmd_restarttx(struct qe_ep *ep)
239 {
240 	if (ep->udc->soc_type == PORT_CPM)
241 		cpm_command(CPM_USB_RESTART_TX | (ep->epnum <<
242 				CPM_USB_EP_SHIFT), CPM_USB_RESTART_TX_OPCODE);
243 	else
244 		qe_issue_cmd(QE_USB_RESTART_TX, QE_CR_SUBBLOCK_USB,
245 				ep->epnum, 0);
246 
247 	return 0;
248 }
249 
qe_ep_flushtxfifo(struct qe_ep * ep)250 static int qe_ep_flushtxfifo(struct qe_ep *ep)
251 {
252 	struct qe_udc *udc = ep->udc;
253 	int i;
254 
255 	i = (int)ep->epnum;
256 
257 	qe_ep_cmd_stoptx(ep);
258 	out_8(&udc->usb_regs->usb_uscom,
259 		USB_CMD_FLUSH_FIFO | (USB_CMD_EP_MASK & (ep->epnum)));
260 	out_be16(&udc->ep_param[i]->tbptr, in_be16(&udc->ep_param[i]->tbase));
261 	out_be32(&udc->ep_param[i]->tstate, 0);
262 	out_be16(&udc->ep_param[i]->tbcnt, 0);
263 
264 	ep->c_txbd = ep->txbase;
265 	ep->n_txbd = ep->txbase;
266 	qe_ep_cmd_restarttx(ep);
267 	return 0;
268 }
269 
qe_ep_filltxfifo(struct qe_ep * ep)270 static int qe_ep_filltxfifo(struct qe_ep *ep)
271 {
272 	struct qe_udc *udc = ep->udc;
273 
274 	out_8(&udc->usb_regs->usb_uscom,
275 			USB_CMD_STR_FIFO | (USB_CMD_EP_MASK & (ep->epnum)));
276 	return 0;
277 }
278 
qe_epbds_reset(struct qe_udc * udc,int pipe_num)279 static int qe_epbds_reset(struct qe_udc *udc, int pipe_num)
280 {
281 	struct qe_ep *ep;
282 	u32 bdring_len;
283 	struct qe_bd __iomem *bd;
284 	int i;
285 
286 	ep = &udc->eps[pipe_num];
287 
288 	if (ep->dir == USB_DIR_OUT)
289 		bdring_len = USB_BDRING_LEN_RX;
290 	else
291 		bdring_len = USB_BDRING_LEN;
292 
293 	bd = ep->rxbase;
294 	for (i = 0; i < (bdring_len - 1); i++) {
295 		out_be32((u32 __iomem *)bd, R_E | R_I);
296 		bd++;
297 	}
298 	out_be32((u32 __iomem *)bd, R_E | R_I | R_W);
299 
300 	bd = ep->txbase;
301 	for (i = 0; i < USB_BDRING_LEN_TX - 1; i++) {
302 		out_be32(&bd->buf, 0);
303 		out_be32((u32 __iomem *)bd, 0);
304 		bd++;
305 	}
306 	out_be32((u32 __iomem *)bd, T_W);
307 
308 	return 0;
309 }
310 
qe_ep_reset(struct qe_udc * udc,int pipe_num)311 static int qe_ep_reset(struct qe_udc *udc, int pipe_num)
312 {
313 	struct qe_ep *ep;
314 	u16 tmpusep;
315 
316 	ep = &udc->eps[pipe_num];
317 	tmpusep = in_be16(&udc->usb_regs->usb_usep[pipe_num]);
318 	tmpusep &= ~USB_RTHS_MASK;
319 
320 	switch (ep->dir) {
321 	case USB_DIR_BOTH:
322 		qe_ep_flushtxfifo(ep);
323 		break;
324 	case USB_DIR_OUT:
325 		tmpusep |= USB_THS_IGNORE_IN;
326 		break;
327 	case USB_DIR_IN:
328 		qe_ep_flushtxfifo(ep);
329 		tmpusep |= USB_RHS_IGNORE_OUT;
330 		break;
331 	default:
332 		break;
333 	}
334 	out_be16(&udc->usb_regs->usb_usep[pipe_num], tmpusep);
335 
336 	qe_epbds_reset(udc, pipe_num);
337 
338 	return 0;
339 }
340 
qe_ep_toggledata01(struct qe_ep * ep)341 static int qe_ep_toggledata01(struct qe_ep *ep)
342 {
343 	ep->data01 ^= 0x1;
344 	return 0;
345 }
346 
qe_ep_bd_init(struct qe_udc * udc,unsigned char pipe_num)347 static int qe_ep_bd_init(struct qe_udc *udc, unsigned char pipe_num)
348 {
349 	struct qe_ep *ep = &udc->eps[pipe_num];
350 	unsigned long tmp_addr = 0;
351 	struct usb_ep_para __iomem *epparam;
352 	int i;
353 	struct qe_bd __iomem *bd;
354 	int bdring_len;
355 
356 	if (ep->dir == USB_DIR_OUT)
357 		bdring_len = USB_BDRING_LEN_RX;
358 	else
359 		bdring_len = USB_BDRING_LEN;
360 
361 	epparam = udc->ep_param[pipe_num];
362 	/* alloc multi-ram for BD rings and set the ep parameters */
363 	tmp_addr = cpm_muram_alloc(sizeof(struct qe_bd) * (bdring_len +
364 				USB_BDRING_LEN_TX), QE_ALIGNMENT_OF_BD);
365 	if (IS_ERR_VALUE(tmp_addr))
366 		return -ENOMEM;
367 
368 	out_be16(&epparam->rbase, (u16)tmp_addr);
369 	out_be16(&epparam->tbase, (u16)(tmp_addr +
370 				(sizeof(struct qe_bd) * bdring_len)));
371 
372 	out_be16(&epparam->rbptr, in_be16(&epparam->rbase));
373 	out_be16(&epparam->tbptr, in_be16(&epparam->tbase));
374 
375 	ep->rxbase = cpm_muram_addr(tmp_addr);
376 	ep->txbase = cpm_muram_addr(tmp_addr + (sizeof(struct qe_bd)
377 				* bdring_len));
378 	ep->n_rxbd = ep->rxbase;
379 	ep->e_rxbd = ep->rxbase;
380 	ep->n_txbd = ep->txbase;
381 	ep->c_txbd = ep->txbase;
382 	ep->data01 = 0; /* data0 */
383 
384 	/* Init TX and RX bds */
385 	bd = ep->rxbase;
386 	for (i = 0; i < bdring_len - 1; i++) {
387 		out_be32(&bd->buf, 0);
388 		out_be32((u32 __iomem *)bd, 0);
389 		bd++;
390 	}
391 	out_be32(&bd->buf, 0);
392 	out_be32((u32 __iomem *)bd, R_W);
393 
394 	bd = ep->txbase;
395 	for (i = 0; i < USB_BDRING_LEN_TX - 1; i++) {
396 		out_be32(&bd->buf, 0);
397 		out_be32((u32 __iomem *)bd, 0);
398 		bd++;
399 	}
400 	out_be32(&bd->buf, 0);
401 	out_be32((u32 __iomem *)bd, T_W);
402 
403 	return 0;
404 }
405 
qe_ep_rxbd_update(struct qe_ep * ep)406 static int qe_ep_rxbd_update(struct qe_ep *ep)
407 {
408 	unsigned int size;
409 	int i;
410 	unsigned int tmp;
411 	struct qe_bd __iomem *bd;
412 	unsigned int bdring_len;
413 
414 	if (ep->rxbase == NULL)
415 		return -EINVAL;
416 
417 	bd = ep->rxbase;
418 
419 	ep->rxframe = kmalloc(sizeof(*ep->rxframe), GFP_ATOMIC);
420 	if (!ep->rxframe)
421 		return -ENOMEM;
422 
423 	qe_frame_init(ep->rxframe);
424 
425 	if (ep->dir == USB_DIR_OUT)
426 		bdring_len = USB_BDRING_LEN_RX;
427 	else
428 		bdring_len = USB_BDRING_LEN;
429 
430 	size = (ep->ep.maxpacket + USB_CRC_SIZE + 2) * (bdring_len + 1);
431 	ep->rxbuffer = kzalloc(size, GFP_ATOMIC);
432 	if (!ep->rxbuffer) {
433 		kfree(ep->rxframe);
434 		return -ENOMEM;
435 	}
436 
437 	ep->rxbuf_d = virt_to_phys((void *)ep->rxbuffer);
438 	if (ep->rxbuf_d == DMA_ADDR_INVALID) {
439 		ep->rxbuf_d = dma_map_single(ep->udc->gadget.dev.parent,
440 					ep->rxbuffer,
441 					size,
442 					DMA_FROM_DEVICE);
443 		ep->rxbufmap = 1;
444 	} else {
445 		dma_sync_single_for_device(ep->udc->gadget.dev.parent,
446 					ep->rxbuf_d, size,
447 					DMA_FROM_DEVICE);
448 		ep->rxbufmap = 0;
449 	}
450 
451 	size = ep->ep.maxpacket + USB_CRC_SIZE + 2;
452 	tmp = ep->rxbuf_d;
453 	tmp = (u32)(((tmp >> 2) << 2) + 4);
454 
455 	for (i = 0; i < bdring_len - 1; i++) {
456 		out_be32(&bd->buf, tmp);
457 		out_be32((u32 __iomem *)bd, (R_E | R_I));
458 		tmp = tmp + size;
459 		bd++;
460 	}
461 	out_be32(&bd->buf, tmp);
462 	out_be32((u32 __iomem *)bd, (R_E | R_I | R_W));
463 
464 	return 0;
465 }
466 
qe_ep_register_init(struct qe_udc * udc,unsigned char pipe_num)467 static int qe_ep_register_init(struct qe_udc *udc, unsigned char pipe_num)
468 {
469 	struct qe_ep *ep = &udc->eps[pipe_num];
470 	struct usb_ep_para __iomem *epparam;
471 	u16 usep, logepnum;
472 	u16 tmp;
473 	u8 rtfcr = 0;
474 
475 	epparam = udc->ep_param[pipe_num];
476 
477 	usep = 0;
478 	logepnum = (ep->ep.desc->bEndpointAddress & USB_ENDPOINT_NUMBER_MASK);
479 	usep |= (logepnum << USB_EPNUM_SHIFT);
480 
481 	switch (ep->ep.desc->bmAttributes & 0x03) {
482 	case USB_ENDPOINT_XFER_BULK:
483 		usep |= USB_TRANS_BULK;
484 		break;
485 	case USB_ENDPOINT_XFER_ISOC:
486 		usep |=  USB_TRANS_ISO;
487 		break;
488 	case USB_ENDPOINT_XFER_INT:
489 		usep |= USB_TRANS_INT;
490 		break;
491 	default:
492 		usep |= USB_TRANS_CTR;
493 		break;
494 	}
495 
496 	switch (ep->dir) {
497 	case USB_DIR_OUT:
498 		usep |= USB_THS_IGNORE_IN;
499 		break;
500 	case USB_DIR_IN:
501 		usep |= USB_RHS_IGNORE_OUT;
502 		break;
503 	default:
504 		break;
505 	}
506 	out_be16(&udc->usb_regs->usb_usep[pipe_num], usep);
507 
508 	rtfcr = 0x30;
509 	out_8(&epparam->rbmr, rtfcr);
510 	out_8(&epparam->tbmr, rtfcr);
511 
512 	tmp = (u16)(ep->ep.maxpacket + USB_CRC_SIZE);
513 	/* MRBLR must be divisble by 4 */
514 	tmp = (u16)(((tmp >> 2) << 2) + 4);
515 	out_be16(&epparam->mrblr, tmp);
516 
517 	return 0;
518 }
519 
qe_ep_init(struct qe_udc * udc,unsigned char pipe_num,const struct usb_endpoint_descriptor * desc)520 static int qe_ep_init(struct qe_udc *udc,
521 		      unsigned char pipe_num,
522 		      const struct usb_endpoint_descriptor *desc)
523 {
524 	struct qe_ep *ep = &udc->eps[pipe_num];
525 	unsigned long flags;
526 	int reval = 0;
527 	u16 max = 0;
528 
529 	max = usb_endpoint_maxp(desc);
530 
531 	/* check the max package size validate for this endpoint */
532 	/* Refer to USB2.0 spec table 9-13,
533 	*/
534 	if (pipe_num != 0) {
535 		switch (desc->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK) {
536 		case USB_ENDPOINT_XFER_BULK:
537 			if (strstr(ep->ep.name, "-iso")
538 					|| strstr(ep->ep.name, "-int"))
539 				goto en_done;
540 			switch (udc->gadget.speed) {
541 			case USB_SPEED_HIGH:
542 			if ((max == 128) || (max == 256) || (max == 512))
543 				break;
544 			default:
545 				switch (max) {
546 				case 4:
547 				case 8:
548 				case 16:
549 				case 32:
550 				case 64:
551 					break;
552 				default:
553 				case USB_SPEED_LOW:
554 					goto en_done;
555 				}
556 			}
557 			break;
558 		case USB_ENDPOINT_XFER_INT:
559 			if (strstr(ep->ep.name, "-iso"))	/* bulk is ok */
560 				goto en_done;
561 			switch (udc->gadget.speed) {
562 			case USB_SPEED_HIGH:
563 				if (max <= 1024)
564 					break;
565 			case USB_SPEED_FULL:
566 				if (max <= 64)
567 					break;
568 			default:
569 				if (max <= 8)
570 					break;
571 				goto en_done;
572 			}
573 			break;
574 		case USB_ENDPOINT_XFER_ISOC:
575 			if (strstr(ep->ep.name, "-bulk")
576 				|| strstr(ep->ep.name, "-int"))
577 				goto en_done;
578 			switch (udc->gadget.speed) {
579 			case USB_SPEED_HIGH:
580 				if (max <= 1024)
581 					break;
582 			case USB_SPEED_FULL:
583 				if (max <= 1023)
584 					break;
585 			default:
586 				goto en_done;
587 			}
588 			break;
589 		case USB_ENDPOINT_XFER_CONTROL:
590 			if (strstr(ep->ep.name, "-iso")
591 				|| strstr(ep->ep.name, "-int"))
592 				goto en_done;
593 			switch (udc->gadget.speed) {
594 			case USB_SPEED_HIGH:
595 			case USB_SPEED_FULL:
596 				switch (max) {
597 				case 1:
598 				case 2:
599 				case 4:
600 				case 8:
601 				case 16:
602 				case 32:
603 				case 64:
604 					break;
605 				default:
606 					goto en_done;
607 				}
608 			case USB_SPEED_LOW:
609 				switch (max) {
610 				case 1:
611 				case 2:
612 				case 4:
613 				case 8:
614 					break;
615 				default:
616 					goto en_done;
617 				}
618 			default:
619 				goto en_done;
620 			}
621 			break;
622 
623 		default:
624 			goto en_done;
625 		}
626 	} /* if ep0*/
627 
628 	spin_lock_irqsave(&udc->lock, flags);
629 
630 	/* initialize ep structure */
631 	ep->ep.maxpacket = max;
632 	ep->tm = (u8)(desc->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK);
633 	ep->ep.desc = desc;
634 	ep->stopped = 0;
635 	ep->init = 1;
636 
637 	if (pipe_num == 0) {
638 		ep->dir = USB_DIR_BOTH;
639 		udc->ep0_dir = USB_DIR_OUT;
640 		udc->ep0_state = WAIT_FOR_SETUP;
641 	} else	{
642 		switch (desc->bEndpointAddress & USB_ENDPOINT_DIR_MASK) {
643 		case USB_DIR_OUT:
644 			ep->dir = USB_DIR_OUT;
645 			break;
646 		case USB_DIR_IN:
647 			ep->dir = USB_DIR_IN;
648 		default:
649 			break;
650 		}
651 	}
652 
653 	/* hardware special operation */
654 	qe_ep_bd_init(udc, pipe_num);
655 	if ((ep->tm == USBP_TM_CTL) || (ep->dir == USB_DIR_OUT)) {
656 		reval = qe_ep_rxbd_update(ep);
657 		if (reval)
658 			goto en_done1;
659 	}
660 
661 	if ((ep->tm == USBP_TM_CTL) || (ep->dir == USB_DIR_IN)) {
662 		ep->txframe = kmalloc(sizeof(*ep->txframe), GFP_ATOMIC);
663 		if (!ep->txframe)
664 			goto en_done2;
665 		qe_frame_init(ep->txframe);
666 	}
667 
668 	qe_ep_register_init(udc, pipe_num);
669 
670 	/* Now HW will be NAKing transfers to that EP,
671 	 * until a buffer is queued to it. */
672 	spin_unlock_irqrestore(&udc->lock, flags);
673 
674 	return 0;
675 en_done2:
676 	kfree(ep->rxbuffer);
677 	kfree(ep->rxframe);
678 en_done1:
679 	spin_unlock_irqrestore(&udc->lock, flags);
680 en_done:
681 	dev_err(udc->dev, "failed to initialize %s\n", ep->ep.name);
682 	return -ENODEV;
683 }
684 
qe_usb_enable(struct qe_udc * udc)685 static inline void qe_usb_enable(struct qe_udc *udc)
686 {
687 	setbits8(&udc->usb_regs->usb_usmod, USB_MODE_EN);
688 }
689 
qe_usb_disable(struct qe_udc * udc)690 static inline void qe_usb_disable(struct qe_udc *udc)
691 {
692 	clrbits8(&udc->usb_regs->usb_usmod, USB_MODE_EN);
693 }
694 
695 /*----------------------------------------------------------------------------*
696  *		USB and EP basic manipulate function end		      *
697  *----------------------------------------------------------------------------*/
698 
699 
700 /******************************************************************************
701 		UDC transmit and receive process
702  ******************************************************************************/
recycle_one_rxbd(struct qe_ep * ep)703 static void recycle_one_rxbd(struct qe_ep *ep)
704 {
705 	u32 bdstatus;
706 
707 	bdstatus = in_be32((u32 __iomem *)ep->e_rxbd);
708 	bdstatus = R_I | R_E | (bdstatus & R_W);
709 	out_be32((u32 __iomem *)ep->e_rxbd, bdstatus);
710 
711 	if (bdstatus & R_W)
712 		ep->e_rxbd = ep->rxbase;
713 	else
714 		ep->e_rxbd++;
715 }
716 
recycle_rxbds(struct qe_ep * ep,unsigned char stopatnext)717 static void recycle_rxbds(struct qe_ep *ep, unsigned char stopatnext)
718 {
719 	u32 bdstatus;
720 	struct qe_bd __iomem *bd, *nextbd;
721 	unsigned char stop = 0;
722 
723 	nextbd = ep->n_rxbd;
724 	bd = ep->e_rxbd;
725 	bdstatus = in_be32((u32 __iomem *)bd);
726 
727 	while (!(bdstatus & R_E) && !(bdstatus & BD_LENGTH_MASK) && !stop) {
728 		bdstatus = R_E | R_I | (bdstatus & R_W);
729 		out_be32((u32 __iomem *)bd, bdstatus);
730 
731 		if (bdstatus & R_W)
732 			bd = ep->rxbase;
733 		else
734 			bd++;
735 
736 		bdstatus = in_be32((u32 __iomem *)bd);
737 		if (stopatnext && (bd == nextbd))
738 			stop = 1;
739 	}
740 
741 	ep->e_rxbd = bd;
742 }
743 
ep_recycle_rxbds(struct qe_ep * ep)744 static void ep_recycle_rxbds(struct qe_ep *ep)
745 {
746 	struct qe_bd __iomem *bd = ep->n_rxbd;
747 	u32 bdstatus;
748 	u8 epnum = ep->epnum;
749 	struct qe_udc *udc = ep->udc;
750 
751 	bdstatus = in_be32((u32 __iomem *)bd);
752 	if (!(bdstatus & R_E) && !(bdstatus & BD_LENGTH_MASK)) {
753 		bd = ep->rxbase +
754 				((in_be16(&udc->ep_param[epnum]->rbptr) -
755 				  in_be16(&udc->ep_param[epnum]->rbase))
756 				 >> 3);
757 		bdstatus = in_be32((u32 __iomem *)bd);
758 
759 		if (bdstatus & R_W)
760 			bd = ep->rxbase;
761 		else
762 			bd++;
763 
764 		ep->e_rxbd = bd;
765 		recycle_rxbds(ep, 0);
766 		ep->e_rxbd = ep->n_rxbd;
767 	} else
768 		recycle_rxbds(ep, 1);
769 
770 	if (in_be16(&udc->usb_regs->usb_usber) & USB_E_BSY_MASK)
771 		out_be16(&udc->usb_regs->usb_usber, USB_E_BSY_MASK);
772 
773 	if (ep->has_data <= 0 && (!list_empty(&ep->queue)))
774 		qe_eprx_normal(ep);
775 
776 	ep->localnack = 0;
777 }
778 
779 static void setup_received_handle(struct qe_udc *udc,
780 					struct usb_ctrlrequest *setup);
781 static int qe_ep_rxframe_handle(struct qe_ep *ep);
782 static void ep0_req_complete(struct qe_udc *udc, struct qe_req *req);
783 /* when BD PID is setup, handle the packet */
ep0_setup_handle(struct qe_udc * udc)784 static int ep0_setup_handle(struct qe_udc *udc)
785 {
786 	struct qe_ep *ep = &udc->eps[0];
787 	struct qe_frame *pframe;
788 	unsigned int fsize;
789 	u8 *cp;
790 
791 	pframe = ep->rxframe;
792 	if ((frame_get_info(pframe) & PID_SETUP)
793 			&& (udc->ep0_state == WAIT_FOR_SETUP)) {
794 		fsize = frame_get_length(pframe);
795 		if (unlikely(fsize != 8))
796 			return -EINVAL;
797 		cp = (u8 *)&udc->local_setup_buff;
798 		memcpy(cp, pframe->data, fsize);
799 		ep->data01 = 1;
800 
801 		/* handle the usb command base on the usb_ctrlrequest */
802 		setup_received_handle(udc, &udc->local_setup_buff);
803 		return 0;
804 	}
805 	return -EINVAL;
806 }
807 
qe_ep0_rx(struct qe_udc * udc)808 static int qe_ep0_rx(struct qe_udc *udc)
809 {
810 	struct qe_ep *ep = &udc->eps[0];
811 	struct qe_frame *pframe;
812 	struct qe_bd __iomem *bd;
813 	u32 bdstatus, length;
814 	u32 vaddr;
815 
816 	pframe = ep->rxframe;
817 
818 	if (ep->dir == USB_DIR_IN) {
819 		dev_err(udc->dev, "ep0 not a control endpoint\n");
820 		return -EINVAL;
821 	}
822 
823 	bd = ep->n_rxbd;
824 	bdstatus = in_be32((u32 __iomem *)bd);
825 	length = bdstatus & BD_LENGTH_MASK;
826 
827 	while (!(bdstatus & R_E) && length) {
828 		if ((bdstatus & R_F) && (bdstatus & R_L)
829 			&& !(bdstatus & R_ERROR)) {
830 			if (length == USB_CRC_SIZE) {
831 				udc->ep0_state = WAIT_FOR_SETUP;
832 				dev_vdbg(udc->dev,
833 					"receive a ZLP in status phase\n");
834 			} else {
835 				qe_frame_clean(pframe);
836 				vaddr = (u32)phys_to_virt(in_be32(&bd->buf));
837 				frame_set_data(pframe, (u8 *)vaddr);
838 				frame_set_length(pframe,
839 						(length - USB_CRC_SIZE));
840 				frame_set_status(pframe, FRAME_OK);
841 				switch (bdstatus & R_PID) {
842 				case R_PID_SETUP:
843 					frame_set_info(pframe, PID_SETUP);
844 					break;
845 				case R_PID_DATA1:
846 					frame_set_info(pframe, PID_DATA1);
847 					break;
848 				default:
849 					frame_set_info(pframe, PID_DATA0);
850 					break;
851 				}
852 
853 				if ((bdstatus & R_PID) == R_PID_SETUP)
854 					ep0_setup_handle(udc);
855 				else
856 					qe_ep_rxframe_handle(ep);
857 			}
858 		} else {
859 			dev_err(udc->dev, "The receive frame with error!\n");
860 		}
861 
862 		/* note: don't clear the rxbd's buffer address */
863 		recycle_one_rxbd(ep);
864 
865 		/* Get next BD */
866 		if (bdstatus & R_W)
867 			bd = ep->rxbase;
868 		else
869 			bd++;
870 
871 		bdstatus = in_be32((u32 __iomem *)bd);
872 		length = bdstatus & BD_LENGTH_MASK;
873 
874 	}
875 
876 	ep->n_rxbd = bd;
877 
878 	return 0;
879 }
880 
qe_ep_rxframe_handle(struct qe_ep * ep)881 static int qe_ep_rxframe_handle(struct qe_ep *ep)
882 {
883 	struct qe_frame *pframe;
884 	u8 framepid = 0;
885 	unsigned int fsize;
886 	u8 *cp;
887 	struct qe_req *req;
888 
889 	pframe = ep->rxframe;
890 
891 	if (frame_get_info(pframe) & PID_DATA1)
892 		framepid = 0x1;
893 
894 	if (framepid != ep->data01) {
895 		dev_err(ep->udc->dev, "the data01 error!\n");
896 		return -EIO;
897 	}
898 
899 	fsize = frame_get_length(pframe);
900 	if (list_empty(&ep->queue)) {
901 		dev_err(ep->udc->dev, "the %s have no requeue!\n", ep->name);
902 	} else {
903 		req = list_entry(ep->queue.next, struct qe_req, queue);
904 
905 		cp = (u8 *)(req->req.buf) + req->req.actual;
906 		if (cp) {
907 			memcpy(cp, pframe->data, fsize);
908 			req->req.actual += fsize;
909 			if ((fsize < ep->ep.maxpacket) ||
910 					(req->req.actual >= req->req.length)) {
911 				if (ep->epnum == 0)
912 					ep0_req_complete(ep->udc, req);
913 				else
914 					done(ep, req, 0);
915 				if (list_empty(&ep->queue) && ep->epnum != 0)
916 					qe_eprx_nack(ep);
917 			}
918 		}
919 	}
920 
921 	qe_ep_toggledata01(ep);
922 
923 	return 0;
924 }
925 
ep_rx_tasklet(struct tasklet_struct * t)926 static void ep_rx_tasklet(struct tasklet_struct *t)
927 {
928 	struct qe_udc *udc = from_tasklet(udc, t, rx_tasklet);
929 	struct qe_ep *ep;
930 	struct qe_frame *pframe;
931 	struct qe_bd __iomem *bd;
932 	unsigned long flags;
933 	u32 bdstatus, length;
934 	u32 vaddr, i;
935 
936 	spin_lock_irqsave(&udc->lock, flags);
937 
938 	for (i = 1; i < USB_MAX_ENDPOINTS; i++) {
939 		ep = &udc->eps[i];
940 
941 		if (ep->dir == USB_DIR_IN || ep->enable_tasklet == 0) {
942 			dev_dbg(udc->dev,
943 				"This is a transmit ep or disable tasklet!\n");
944 			continue;
945 		}
946 
947 		pframe = ep->rxframe;
948 		bd = ep->n_rxbd;
949 		bdstatus = in_be32((u32 __iomem *)bd);
950 		length = bdstatus & BD_LENGTH_MASK;
951 
952 		while (!(bdstatus & R_E) && length) {
953 			if (list_empty(&ep->queue)) {
954 				qe_eprx_nack(ep);
955 				dev_dbg(udc->dev,
956 					"The rxep have noreq %d\n",
957 					ep->has_data);
958 				break;
959 			}
960 
961 			if ((bdstatus & R_F) && (bdstatus & R_L)
962 				&& !(bdstatus & R_ERROR)) {
963 				qe_frame_clean(pframe);
964 				vaddr = (u32)phys_to_virt(in_be32(&bd->buf));
965 				frame_set_data(pframe, (u8 *)vaddr);
966 				frame_set_length(pframe,
967 						(length - USB_CRC_SIZE));
968 				frame_set_status(pframe, FRAME_OK);
969 				switch (bdstatus & R_PID) {
970 				case R_PID_DATA1:
971 					frame_set_info(pframe, PID_DATA1);
972 					break;
973 				case R_PID_SETUP:
974 					frame_set_info(pframe, PID_SETUP);
975 					break;
976 				default:
977 					frame_set_info(pframe, PID_DATA0);
978 					break;
979 				}
980 				/* handle the rx frame */
981 				qe_ep_rxframe_handle(ep);
982 			} else {
983 				dev_err(udc->dev,
984 					"error in received frame\n");
985 			}
986 			/* note: don't clear the rxbd's buffer address */
987 			/*clear the length */
988 			out_be32((u32 __iomem *)bd, bdstatus & BD_STATUS_MASK);
989 			ep->has_data--;
990 			if (!(ep->localnack))
991 				recycle_one_rxbd(ep);
992 
993 			/* Get next BD */
994 			if (bdstatus & R_W)
995 				bd = ep->rxbase;
996 			else
997 				bd++;
998 
999 			bdstatus = in_be32((u32 __iomem *)bd);
1000 			length = bdstatus & BD_LENGTH_MASK;
1001 		}
1002 
1003 		ep->n_rxbd = bd;
1004 
1005 		if (ep->localnack)
1006 			ep_recycle_rxbds(ep);
1007 
1008 		ep->enable_tasklet = 0;
1009 	} /* for i=1 */
1010 
1011 	spin_unlock_irqrestore(&udc->lock, flags);
1012 }
1013 
qe_ep_rx(struct qe_ep * ep)1014 static int qe_ep_rx(struct qe_ep *ep)
1015 {
1016 	struct qe_udc *udc;
1017 	struct qe_frame *pframe;
1018 	struct qe_bd __iomem *bd;
1019 	u16 swoffs, ucoffs, emptybds;
1020 
1021 	udc = ep->udc;
1022 	pframe = ep->rxframe;
1023 
1024 	if (ep->dir == USB_DIR_IN) {
1025 		dev_err(udc->dev, "transmit ep in rx function\n");
1026 		return -EINVAL;
1027 	}
1028 
1029 	bd = ep->n_rxbd;
1030 
1031 	swoffs = (u16)(bd - ep->rxbase);
1032 	ucoffs = (u16)((in_be16(&udc->ep_param[ep->epnum]->rbptr) -
1033 			in_be16(&udc->ep_param[ep->epnum]->rbase)) >> 3);
1034 	if (swoffs < ucoffs)
1035 		emptybds = USB_BDRING_LEN_RX - ucoffs + swoffs;
1036 	else
1037 		emptybds = swoffs - ucoffs;
1038 
1039 	if (emptybds < MIN_EMPTY_BDS) {
1040 		qe_eprx_nack(ep);
1041 		ep->localnack = 1;
1042 		dev_vdbg(udc->dev, "%d empty bds, send NACK\n", emptybds);
1043 	}
1044 	ep->has_data = USB_BDRING_LEN_RX - emptybds;
1045 
1046 	if (list_empty(&ep->queue)) {
1047 		qe_eprx_nack(ep);
1048 		dev_vdbg(udc->dev, "The rxep have no req queued with %d BDs\n",
1049 				ep->has_data);
1050 		return 0;
1051 	}
1052 
1053 	tasklet_schedule(&udc->rx_tasklet);
1054 	ep->enable_tasklet = 1;
1055 
1056 	return 0;
1057 }
1058 
1059 /* send data from a frame, no matter what tx_req */
qe_ep_tx(struct qe_ep * ep,struct qe_frame * frame)1060 static int qe_ep_tx(struct qe_ep *ep, struct qe_frame *frame)
1061 {
1062 	struct qe_udc *udc = ep->udc;
1063 	struct qe_bd __iomem *bd;
1064 	u16 saveusbmr;
1065 	u32 bdstatus, pidmask;
1066 	u32 paddr;
1067 
1068 	if (ep->dir == USB_DIR_OUT) {
1069 		dev_err(udc->dev, "receive ep passed to tx function\n");
1070 		return -EINVAL;
1071 	}
1072 
1073 	/* Disable the Tx interrupt */
1074 	saveusbmr = in_be16(&udc->usb_regs->usb_usbmr);
1075 	out_be16(&udc->usb_regs->usb_usbmr,
1076 			saveusbmr & ~(USB_E_TXB_MASK | USB_E_TXE_MASK));
1077 
1078 	bd = ep->n_txbd;
1079 	bdstatus = in_be32((u32 __iomem *)bd);
1080 
1081 	if (!(bdstatus & (T_R | BD_LENGTH_MASK))) {
1082 		if (frame_get_length(frame) == 0) {
1083 			frame_set_data(frame, udc->nullbuf);
1084 			frame_set_length(frame, 2);
1085 			frame->info |= (ZLP | NO_CRC);
1086 			dev_vdbg(udc->dev, "the frame size = 0\n");
1087 		}
1088 		paddr = virt_to_phys((void *)frame->data);
1089 		out_be32(&bd->buf, paddr);
1090 		bdstatus = (bdstatus&T_W);
1091 		if (!(frame_get_info(frame) & NO_CRC))
1092 			bdstatus |= T_R | T_I | T_L | T_TC
1093 					| frame_get_length(frame);
1094 		else
1095 			bdstatus |= T_R | T_I | T_L | frame_get_length(frame);
1096 
1097 		/* if the packet is a ZLP in status phase */
1098 		if ((ep->epnum == 0) && (udc->ep0_state == DATA_STATE_NEED_ZLP))
1099 			ep->data01 = 0x1;
1100 
1101 		if (ep->data01) {
1102 			pidmask = T_PID_DATA1;
1103 			frame->info |= PID_DATA1;
1104 		} else {
1105 			pidmask = T_PID_DATA0;
1106 			frame->info |= PID_DATA0;
1107 		}
1108 		bdstatus |= T_CNF;
1109 		bdstatus |= pidmask;
1110 		out_be32((u32 __iomem *)bd, bdstatus);
1111 		qe_ep_filltxfifo(ep);
1112 
1113 		/* enable the TX interrupt */
1114 		out_be16(&udc->usb_regs->usb_usbmr, saveusbmr);
1115 
1116 		qe_ep_toggledata01(ep);
1117 		if (bdstatus & T_W)
1118 			ep->n_txbd = ep->txbase;
1119 		else
1120 			ep->n_txbd++;
1121 
1122 		return 0;
1123 	} else {
1124 		out_be16(&udc->usb_regs->usb_usbmr, saveusbmr);
1125 		dev_vdbg(udc->dev, "The tx bd is not ready!\n");
1126 		return -EBUSY;
1127 	}
1128 }
1129 
1130 /* when a bd was transmitted, the function can
1131  * handle the tx_req, not include ep0           */
txcomplete(struct qe_ep * ep,unsigned char restart)1132 static int txcomplete(struct qe_ep *ep, unsigned char restart)
1133 {
1134 	if (ep->tx_req != NULL) {
1135 		struct qe_req *req = ep->tx_req;
1136 		unsigned zlp = 0, last_len = 0;
1137 
1138 		last_len = min_t(unsigned, req->req.length - ep->sent,
1139 				ep->ep.maxpacket);
1140 
1141 		if (!restart) {
1142 			int asent = ep->last;
1143 			ep->sent += asent;
1144 			ep->last -= asent;
1145 		} else {
1146 			ep->last = 0;
1147 		}
1148 
1149 		/* zlp needed when req->re.zero is set */
1150 		if (req->req.zero) {
1151 			if (last_len == 0 ||
1152 				(req->req.length % ep->ep.maxpacket) != 0)
1153 				zlp = 0;
1154 			else
1155 				zlp = 1;
1156 		} else
1157 			zlp = 0;
1158 
1159 		/* a request already were transmitted completely */
1160 		if (((ep->tx_req->req.length - ep->sent) <= 0) && !zlp) {
1161 			done(ep, ep->tx_req, 0);
1162 			ep->tx_req = NULL;
1163 			ep->last = 0;
1164 			ep->sent = 0;
1165 		}
1166 	}
1167 
1168 	/* we should gain a new tx_req fot this endpoint */
1169 	if (ep->tx_req == NULL) {
1170 		if (!list_empty(&ep->queue)) {
1171 			ep->tx_req = list_entry(ep->queue.next,	struct qe_req,
1172 							queue);
1173 			ep->last = 0;
1174 			ep->sent = 0;
1175 		}
1176 	}
1177 
1178 	return 0;
1179 }
1180 
1181 /* give a frame and a tx_req, send some data */
qe_usb_senddata(struct qe_ep * ep,struct qe_frame * frame)1182 static int qe_usb_senddata(struct qe_ep *ep, struct qe_frame *frame)
1183 {
1184 	unsigned int size;
1185 	u8 *buf;
1186 
1187 	qe_frame_clean(frame);
1188 	size = min_t(u32, (ep->tx_req->req.length - ep->sent),
1189 				ep->ep.maxpacket);
1190 	buf = (u8 *)ep->tx_req->req.buf + ep->sent;
1191 	if (buf && size) {
1192 		ep->last = size;
1193 		ep->tx_req->req.actual += size;
1194 		frame_set_data(frame, buf);
1195 		frame_set_length(frame, size);
1196 		frame_set_status(frame, FRAME_OK);
1197 		frame_set_info(frame, 0);
1198 		return qe_ep_tx(ep, frame);
1199 	}
1200 	return -EIO;
1201 }
1202 
1203 /* give a frame struct,send a ZLP */
sendnulldata(struct qe_ep * ep,struct qe_frame * frame,uint infor)1204 static int sendnulldata(struct qe_ep *ep, struct qe_frame *frame, uint infor)
1205 {
1206 	struct qe_udc *udc = ep->udc;
1207 
1208 	if (frame == NULL)
1209 		return -ENODEV;
1210 
1211 	qe_frame_clean(frame);
1212 	frame_set_data(frame, (u8 *)udc->nullbuf);
1213 	frame_set_length(frame, 2);
1214 	frame_set_status(frame, FRAME_OK);
1215 	frame_set_info(frame, (ZLP | NO_CRC | infor));
1216 
1217 	return qe_ep_tx(ep, frame);
1218 }
1219 
frame_create_tx(struct qe_ep * ep,struct qe_frame * frame)1220 static int frame_create_tx(struct qe_ep *ep, struct qe_frame *frame)
1221 {
1222 	struct qe_req *req = ep->tx_req;
1223 	int reval;
1224 
1225 	if (req == NULL)
1226 		return -ENODEV;
1227 
1228 	if ((req->req.length - ep->sent) > 0)
1229 		reval = qe_usb_senddata(ep, frame);
1230 	else
1231 		reval = sendnulldata(ep, frame, 0);
1232 
1233 	return reval;
1234 }
1235 
1236 /* if direction is DIR_IN, the status is Device->Host
1237  * if direction is DIR_OUT, the status transaction is Device<-Host
1238  * in status phase, udc create a request and gain status */
ep0_prime_status(struct qe_udc * udc,int direction)1239 static int ep0_prime_status(struct qe_udc *udc, int direction)
1240 {
1241 
1242 	struct qe_ep *ep = &udc->eps[0];
1243 
1244 	if (direction == USB_DIR_IN) {
1245 		udc->ep0_state = DATA_STATE_NEED_ZLP;
1246 		udc->ep0_dir = USB_DIR_IN;
1247 		sendnulldata(ep, ep->txframe, SETUP_STATUS | NO_REQ);
1248 	} else {
1249 		udc->ep0_dir = USB_DIR_OUT;
1250 		udc->ep0_state = WAIT_FOR_OUT_STATUS;
1251 	}
1252 
1253 	return 0;
1254 }
1255 
1256 /* a request complete in ep0, whether gadget request or udc request */
ep0_req_complete(struct qe_udc * udc,struct qe_req * req)1257 static void ep0_req_complete(struct qe_udc *udc, struct qe_req *req)
1258 {
1259 	struct qe_ep *ep = &udc->eps[0];
1260 	/* because usb and ep's status already been set in ch9setaddress() */
1261 
1262 	switch (udc->ep0_state) {
1263 	case DATA_STATE_XMIT:
1264 		done(ep, req, 0);
1265 		/* receive status phase */
1266 		if (ep0_prime_status(udc, USB_DIR_OUT))
1267 			qe_ep0_stall(udc);
1268 		break;
1269 
1270 	case DATA_STATE_NEED_ZLP:
1271 		done(ep, req, 0);
1272 		udc->ep0_state = WAIT_FOR_SETUP;
1273 		break;
1274 
1275 	case DATA_STATE_RECV:
1276 		done(ep, req, 0);
1277 		/* send status phase */
1278 		if (ep0_prime_status(udc, USB_DIR_IN))
1279 			qe_ep0_stall(udc);
1280 		break;
1281 
1282 	case WAIT_FOR_OUT_STATUS:
1283 		done(ep, req, 0);
1284 		udc->ep0_state = WAIT_FOR_SETUP;
1285 		break;
1286 
1287 	case WAIT_FOR_SETUP:
1288 		dev_vdbg(udc->dev, "Unexpected interrupt\n");
1289 		break;
1290 
1291 	default:
1292 		qe_ep0_stall(udc);
1293 		break;
1294 	}
1295 }
1296 
ep0_txcomplete(struct qe_ep * ep,unsigned char restart)1297 static int ep0_txcomplete(struct qe_ep *ep, unsigned char restart)
1298 {
1299 	struct qe_req *tx_req = NULL;
1300 	struct qe_frame *frame = ep->txframe;
1301 
1302 	if ((frame_get_info(frame) & (ZLP | NO_REQ)) == (ZLP | NO_REQ)) {
1303 		if (!restart)
1304 			ep->udc->ep0_state = WAIT_FOR_SETUP;
1305 		else
1306 			sendnulldata(ep, ep->txframe, SETUP_STATUS | NO_REQ);
1307 		return 0;
1308 	}
1309 
1310 	tx_req = ep->tx_req;
1311 	if (tx_req != NULL) {
1312 		if (!restart) {
1313 			int asent = ep->last;
1314 			ep->sent += asent;
1315 			ep->last -= asent;
1316 		} else {
1317 			ep->last = 0;
1318 		}
1319 
1320 		/* a request already were transmitted completely */
1321 		if ((ep->tx_req->req.length - ep->sent) <= 0) {
1322 			ep->tx_req->req.actual = (unsigned int)ep->sent;
1323 			ep0_req_complete(ep->udc, ep->tx_req);
1324 			ep->tx_req = NULL;
1325 			ep->last = 0;
1326 			ep->sent = 0;
1327 		}
1328 	} else {
1329 		dev_vdbg(ep->udc->dev, "the ep0_controller have no req\n");
1330 	}
1331 
1332 	return 0;
1333 }
1334 
ep0_txframe_handle(struct qe_ep * ep)1335 static int ep0_txframe_handle(struct qe_ep *ep)
1336 {
1337 	/* if have error, transmit again */
1338 	if (frame_get_status(ep->txframe) & FRAME_ERROR) {
1339 		qe_ep_flushtxfifo(ep);
1340 		dev_vdbg(ep->udc->dev, "The EP0 transmit data have error!\n");
1341 		if (frame_get_info(ep->txframe) & PID_DATA0)
1342 			ep->data01 = 0;
1343 		else
1344 			ep->data01 = 1;
1345 
1346 		ep0_txcomplete(ep, 1);
1347 	} else
1348 		ep0_txcomplete(ep, 0);
1349 
1350 	frame_create_tx(ep, ep->txframe);
1351 	return 0;
1352 }
1353 
qe_ep0_txconf(struct qe_ep * ep)1354 static int qe_ep0_txconf(struct qe_ep *ep)
1355 {
1356 	struct qe_bd __iomem *bd;
1357 	struct qe_frame *pframe;
1358 	u32 bdstatus;
1359 
1360 	bd = ep->c_txbd;
1361 	bdstatus = in_be32((u32 __iomem *)bd);
1362 	while (!(bdstatus & T_R) && (bdstatus & ~T_W)) {
1363 		pframe = ep->txframe;
1364 
1365 		/* clear and recycle the BD */
1366 		out_be32((u32 __iomem *)bd, bdstatus & T_W);
1367 		out_be32(&bd->buf, 0);
1368 		if (bdstatus & T_W)
1369 			ep->c_txbd = ep->txbase;
1370 		else
1371 			ep->c_txbd++;
1372 
1373 		if (ep->c_txbd == ep->n_txbd) {
1374 			if (bdstatus & DEVICE_T_ERROR) {
1375 				frame_set_status(pframe, FRAME_ERROR);
1376 				if (bdstatus & T_TO)
1377 					pframe->status |= TX_ER_TIMEOUT;
1378 				if (bdstatus & T_UN)
1379 					pframe->status |= TX_ER_UNDERUN;
1380 			}
1381 			ep0_txframe_handle(ep);
1382 		}
1383 
1384 		bd = ep->c_txbd;
1385 		bdstatus = in_be32((u32 __iomem *)bd);
1386 	}
1387 
1388 	return 0;
1389 }
1390 
ep_txframe_handle(struct qe_ep * ep)1391 static int ep_txframe_handle(struct qe_ep *ep)
1392 {
1393 	if (frame_get_status(ep->txframe) & FRAME_ERROR) {
1394 		qe_ep_flushtxfifo(ep);
1395 		dev_vdbg(ep->udc->dev, "The EP0 transmit data have error!\n");
1396 		if (frame_get_info(ep->txframe) & PID_DATA0)
1397 			ep->data01 = 0;
1398 		else
1399 			ep->data01 = 1;
1400 
1401 		txcomplete(ep, 1);
1402 	} else
1403 		txcomplete(ep, 0);
1404 
1405 	frame_create_tx(ep, ep->txframe); /* send the data */
1406 	return 0;
1407 }
1408 
1409 /* confirm the already trainsmited bd */
qe_ep_txconf(struct qe_ep * ep)1410 static int qe_ep_txconf(struct qe_ep *ep)
1411 {
1412 	struct qe_bd __iomem *bd;
1413 	struct qe_frame *pframe = NULL;
1414 	u32 bdstatus;
1415 	unsigned char breakonrxinterrupt = 0;
1416 
1417 	bd = ep->c_txbd;
1418 	bdstatus = in_be32((u32 __iomem *)bd);
1419 	while (!(bdstatus & T_R) && (bdstatus & ~T_W)) {
1420 		pframe = ep->txframe;
1421 		if (bdstatus & DEVICE_T_ERROR) {
1422 			frame_set_status(pframe, FRAME_ERROR);
1423 			if (bdstatus & T_TO)
1424 				pframe->status |= TX_ER_TIMEOUT;
1425 			if (bdstatus & T_UN)
1426 				pframe->status |= TX_ER_UNDERUN;
1427 		}
1428 
1429 		/* clear and recycle the BD */
1430 		out_be32((u32 __iomem *)bd, bdstatus & T_W);
1431 		out_be32(&bd->buf, 0);
1432 		if (bdstatus & T_W)
1433 			ep->c_txbd = ep->txbase;
1434 		else
1435 			ep->c_txbd++;
1436 
1437 		/* handle the tx frame */
1438 		ep_txframe_handle(ep);
1439 		bd = ep->c_txbd;
1440 		bdstatus = in_be32((u32 __iomem *)bd);
1441 	}
1442 	if (breakonrxinterrupt)
1443 		return -EIO;
1444 	else
1445 		return 0;
1446 }
1447 
1448 /* Add a request in queue, and try to transmit a packet */
ep_req_send(struct qe_ep * ep,struct qe_req * req)1449 static int ep_req_send(struct qe_ep *ep, struct qe_req *req)
1450 {
1451 	int reval = 0;
1452 
1453 	if (ep->tx_req == NULL) {
1454 		ep->sent = 0;
1455 		ep->last = 0;
1456 		txcomplete(ep, 0); /* can gain a new tx_req */
1457 		reval = frame_create_tx(ep, ep->txframe);
1458 	}
1459 	return reval;
1460 }
1461 
1462 /* Maybe this is a good ideal */
ep_req_rx(struct qe_ep * ep,struct qe_req * req)1463 static int ep_req_rx(struct qe_ep *ep, struct qe_req *req)
1464 {
1465 	struct qe_udc *udc = ep->udc;
1466 	struct qe_frame *pframe = NULL;
1467 	struct qe_bd __iomem *bd;
1468 	u32 bdstatus, length;
1469 	u32 vaddr, fsize;
1470 	u8 *cp;
1471 	u8 finish_req = 0;
1472 	u8 framepid;
1473 
1474 	if (list_empty(&ep->queue)) {
1475 		dev_vdbg(udc->dev, "the req already finish!\n");
1476 		return 0;
1477 	}
1478 	pframe = ep->rxframe;
1479 
1480 	bd = ep->n_rxbd;
1481 	bdstatus = in_be32((u32 __iomem *)bd);
1482 	length = bdstatus & BD_LENGTH_MASK;
1483 
1484 	while (!(bdstatus & R_E) && length) {
1485 		if (finish_req)
1486 			break;
1487 		if ((bdstatus & R_F) && (bdstatus & R_L)
1488 					&& !(bdstatus & R_ERROR)) {
1489 			qe_frame_clean(pframe);
1490 			vaddr = (u32)phys_to_virt(in_be32(&bd->buf));
1491 			frame_set_data(pframe, (u8 *)vaddr);
1492 			frame_set_length(pframe, (length - USB_CRC_SIZE));
1493 			frame_set_status(pframe, FRAME_OK);
1494 			switch (bdstatus & R_PID) {
1495 			case R_PID_DATA1:
1496 				frame_set_info(pframe, PID_DATA1); break;
1497 			default:
1498 				frame_set_info(pframe, PID_DATA0); break;
1499 			}
1500 			/* handle the rx frame */
1501 
1502 			if (frame_get_info(pframe) & PID_DATA1)
1503 				framepid = 0x1;
1504 			else
1505 				framepid = 0;
1506 
1507 			if (framepid != ep->data01) {
1508 				dev_vdbg(udc->dev, "the data01 error!\n");
1509 			} else {
1510 				fsize = frame_get_length(pframe);
1511 
1512 				cp = (u8 *)(req->req.buf) + req->req.actual;
1513 				if (cp) {
1514 					memcpy(cp, pframe->data, fsize);
1515 					req->req.actual += fsize;
1516 					if ((fsize < ep->ep.maxpacket)
1517 						|| (req->req.actual >=
1518 							req->req.length)) {
1519 						finish_req = 1;
1520 						done(ep, req, 0);
1521 						if (list_empty(&ep->queue))
1522 							qe_eprx_nack(ep);
1523 					}
1524 				}
1525 				qe_ep_toggledata01(ep);
1526 			}
1527 		} else {
1528 			dev_err(udc->dev, "The receive frame with error!\n");
1529 		}
1530 
1531 		/* note: don't clear the rxbd's buffer address *
1532 		 * only Clear the length */
1533 		out_be32((u32 __iomem *)bd, (bdstatus & BD_STATUS_MASK));
1534 		ep->has_data--;
1535 
1536 		/* Get next BD */
1537 		if (bdstatus & R_W)
1538 			bd = ep->rxbase;
1539 		else
1540 			bd++;
1541 
1542 		bdstatus = in_be32((u32 __iomem *)bd);
1543 		length = bdstatus & BD_LENGTH_MASK;
1544 	}
1545 
1546 	ep->n_rxbd = bd;
1547 	ep_recycle_rxbds(ep);
1548 
1549 	return 0;
1550 }
1551 
1552 /* only add the request in queue */
ep_req_receive(struct qe_ep * ep,struct qe_req * req)1553 static int ep_req_receive(struct qe_ep *ep, struct qe_req *req)
1554 {
1555 	if (ep->state == EP_STATE_NACK) {
1556 		if (ep->has_data <= 0) {
1557 			/* Enable rx and unmask rx interrupt */
1558 			qe_eprx_normal(ep);
1559 		} else {
1560 			/* Copy the exist BD data */
1561 			ep_req_rx(ep, req);
1562 		}
1563 	}
1564 
1565 	return 0;
1566 }
1567 
1568 /********************************************************************
1569 	Internal Used Function End
1570 ********************************************************************/
1571 
1572 /*-----------------------------------------------------------------------
1573 	Endpoint Management Functions For Gadget
1574  -----------------------------------------------------------------------*/
qe_ep_enable(struct usb_ep * _ep,const struct usb_endpoint_descriptor * desc)1575 static int qe_ep_enable(struct usb_ep *_ep,
1576 			 const struct usb_endpoint_descriptor *desc)
1577 {
1578 	struct qe_udc *udc;
1579 	struct qe_ep *ep;
1580 	int retval = 0;
1581 	unsigned char epnum;
1582 
1583 	ep = container_of(_ep, struct qe_ep, ep);
1584 
1585 	/* catch various bogus parameters */
1586 	if (!_ep || !desc || _ep->name == ep_name[0] ||
1587 			(desc->bDescriptorType != USB_DT_ENDPOINT))
1588 		return -EINVAL;
1589 
1590 	udc = ep->udc;
1591 	if (!udc->driver || (udc->gadget.speed == USB_SPEED_UNKNOWN))
1592 		return -ESHUTDOWN;
1593 
1594 	epnum = (u8)desc->bEndpointAddress & 0xF;
1595 
1596 	retval = qe_ep_init(udc, epnum, desc);
1597 	if (retval != 0) {
1598 		cpm_muram_free(cpm_muram_offset(ep->rxbase));
1599 		dev_dbg(udc->dev, "enable ep%d failed\n", ep->epnum);
1600 		return -EINVAL;
1601 	}
1602 	dev_dbg(udc->dev, "enable ep%d successful\n", ep->epnum);
1603 	return 0;
1604 }
1605 
qe_ep_disable(struct usb_ep * _ep)1606 static int qe_ep_disable(struct usb_ep *_ep)
1607 {
1608 	struct qe_udc *udc;
1609 	struct qe_ep *ep;
1610 	unsigned long flags;
1611 	unsigned int size;
1612 
1613 	ep = container_of(_ep, struct qe_ep, ep);
1614 	udc = ep->udc;
1615 
1616 	if (!_ep || !ep->ep.desc) {
1617 		dev_dbg(udc->dev, "%s not enabled\n", _ep ? ep->ep.name : NULL);
1618 		return -EINVAL;
1619 	}
1620 
1621 	spin_lock_irqsave(&udc->lock, flags);
1622 	/* Nuke all pending requests (does flush) */
1623 	nuke(ep, -ESHUTDOWN);
1624 	ep->ep.desc = NULL;
1625 	ep->stopped = 1;
1626 	ep->tx_req = NULL;
1627 	qe_ep_reset(udc, ep->epnum);
1628 	spin_unlock_irqrestore(&udc->lock, flags);
1629 
1630 	cpm_muram_free(cpm_muram_offset(ep->rxbase));
1631 
1632 	if (ep->dir == USB_DIR_OUT)
1633 		size = (ep->ep.maxpacket + USB_CRC_SIZE + 2) *
1634 				(USB_BDRING_LEN_RX + 1);
1635 	else
1636 		size = (ep->ep.maxpacket + USB_CRC_SIZE + 2) *
1637 				(USB_BDRING_LEN + 1);
1638 
1639 	if (ep->dir != USB_DIR_IN) {
1640 		kfree(ep->rxframe);
1641 		if (ep->rxbufmap) {
1642 			dma_unmap_single(udc->gadget.dev.parent,
1643 					ep->rxbuf_d, size,
1644 					DMA_FROM_DEVICE);
1645 			ep->rxbuf_d = DMA_ADDR_INVALID;
1646 		} else {
1647 			dma_sync_single_for_cpu(
1648 					udc->gadget.dev.parent,
1649 					ep->rxbuf_d, size,
1650 					DMA_FROM_DEVICE);
1651 		}
1652 		kfree(ep->rxbuffer);
1653 	}
1654 
1655 	if (ep->dir != USB_DIR_OUT)
1656 		kfree(ep->txframe);
1657 
1658 	dev_dbg(udc->dev, "disabled %s OK\n", _ep->name);
1659 	return 0;
1660 }
1661 
qe_alloc_request(struct usb_ep * _ep,gfp_t gfp_flags)1662 static struct usb_request *qe_alloc_request(struct usb_ep *_ep,	gfp_t gfp_flags)
1663 {
1664 	struct qe_req *req;
1665 
1666 	req = kzalloc(sizeof(*req), gfp_flags);
1667 	if (!req)
1668 		return NULL;
1669 
1670 	req->req.dma = DMA_ADDR_INVALID;
1671 
1672 	INIT_LIST_HEAD(&req->queue);
1673 
1674 	return &req->req;
1675 }
1676 
qe_free_request(struct usb_ep * _ep,struct usb_request * _req)1677 static void qe_free_request(struct usb_ep *_ep, struct usb_request *_req)
1678 {
1679 	struct qe_req *req;
1680 
1681 	req = container_of(_req, struct qe_req, req);
1682 
1683 	if (_req)
1684 		kfree(req);
1685 }
1686 
__qe_ep_queue(struct usb_ep * _ep,struct usb_request * _req)1687 static int __qe_ep_queue(struct usb_ep *_ep, struct usb_request *_req)
1688 {
1689 	struct qe_ep *ep = container_of(_ep, struct qe_ep, ep);
1690 	struct qe_req *req = container_of(_req, struct qe_req, req);
1691 	struct qe_udc *udc;
1692 	int reval;
1693 
1694 	udc = ep->udc;
1695 	/* catch various bogus parameters */
1696 	if (!_req || !req->req.complete || !req->req.buf
1697 			|| !list_empty(&req->queue)) {
1698 		dev_dbg(udc->dev, "bad params\n");
1699 		return -EINVAL;
1700 	}
1701 	if (!_ep || (!ep->ep.desc && ep_index(ep))) {
1702 		dev_dbg(udc->dev, "bad ep\n");
1703 		return -EINVAL;
1704 	}
1705 
1706 	if (!udc->driver || udc->gadget.speed == USB_SPEED_UNKNOWN)
1707 		return -ESHUTDOWN;
1708 
1709 	req->ep = ep;
1710 
1711 	/* map virtual address to hardware */
1712 	if (req->req.dma == DMA_ADDR_INVALID) {
1713 		req->req.dma = dma_map_single(ep->udc->gadget.dev.parent,
1714 					req->req.buf,
1715 					req->req.length,
1716 					ep_is_in(ep)
1717 					? DMA_TO_DEVICE :
1718 					DMA_FROM_DEVICE);
1719 		req->mapped = 1;
1720 	} else {
1721 		dma_sync_single_for_device(ep->udc->gadget.dev.parent,
1722 					req->req.dma, req->req.length,
1723 					ep_is_in(ep)
1724 					? DMA_TO_DEVICE :
1725 					DMA_FROM_DEVICE);
1726 		req->mapped = 0;
1727 	}
1728 
1729 	req->req.status = -EINPROGRESS;
1730 	req->req.actual = 0;
1731 
1732 	list_add_tail(&req->queue, &ep->queue);
1733 	dev_vdbg(udc->dev, "gadget have request in %s! %d\n",
1734 			ep->name, req->req.length);
1735 
1736 	/* push the request to device */
1737 	if (ep_is_in(ep))
1738 		reval = ep_req_send(ep, req);
1739 
1740 	/* EP0 */
1741 	if (ep_index(ep) == 0 && req->req.length > 0) {
1742 		if (ep_is_in(ep))
1743 			udc->ep0_state = DATA_STATE_XMIT;
1744 		else
1745 			udc->ep0_state = DATA_STATE_RECV;
1746 	}
1747 
1748 	if (ep->dir == USB_DIR_OUT)
1749 		reval = ep_req_receive(ep, req);
1750 
1751 	return 0;
1752 }
1753 
1754 /* queues (submits) an I/O request to an endpoint */
qe_ep_queue(struct usb_ep * _ep,struct usb_request * _req,gfp_t gfp_flags)1755 static int qe_ep_queue(struct usb_ep *_ep, struct usb_request *_req,
1756 		       gfp_t gfp_flags)
1757 {
1758 	struct qe_ep *ep = container_of(_ep, struct qe_ep, ep);
1759 	struct qe_udc *udc = ep->udc;
1760 	unsigned long flags;
1761 	int ret;
1762 
1763 	spin_lock_irqsave(&udc->lock, flags);
1764 	ret = __qe_ep_queue(_ep, _req);
1765 	spin_unlock_irqrestore(&udc->lock, flags);
1766 	return ret;
1767 }
1768 
1769 /* dequeues (cancels, unlinks) an I/O request from an endpoint */
qe_ep_dequeue(struct usb_ep * _ep,struct usb_request * _req)1770 static int qe_ep_dequeue(struct usb_ep *_ep, struct usb_request *_req)
1771 {
1772 	struct qe_ep *ep = container_of(_ep, struct qe_ep, ep);
1773 	struct qe_req *req;
1774 	unsigned long flags;
1775 
1776 	if (!_ep || !_req)
1777 		return -EINVAL;
1778 
1779 	spin_lock_irqsave(&ep->udc->lock, flags);
1780 
1781 	/* make sure it's actually queued on this endpoint */
1782 	list_for_each_entry(req, &ep->queue, queue) {
1783 		if (&req->req == _req)
1784 			break;
1785 	}
1786 
1787 	if (&req->req != _req) {
1788 		spin_unlock_irqrestore(&ep->udc->lock, flags);
1789 		return -EINVAL;
1790 	}
1791 
1792 	done(ep, req, -ECONNRESET);
1793 
1794 	spin_unlock_irqrestore(&ep->udc->lock, flags);
1795 	return 0;
1796 }
1797 
1798 /*-----------------------------------------------------------------
1799  * modify the endpoint halt feature
1800  * @ep: the non-isochronous endpoint being stalled
1801  * @value: 1--set halt  0--clear halt
1802  * Returns zero, or a negative error code.
1803 *----------------------------------------------------------------*/
qe_ep_set_halt(struct usb_ep * _ep,int value)1804 static int qe_ep_set_halt(struct usb_ep *_ep, int value)
1805 {
1806 	struct qe_ep *ep;
1807 	unsigned long flags;
1808 	int status = -EOPNOTSUPP;
1809 	struct qe_udc *udc;
1810 
1811 	ep = container_of(_ep, struct qe_ep, ep);
1812 	if (!_ep || !ep->ep.desc) {
1813 		status = -EINVAL;
1814 		goto out;
1815 	}
1816 
1817 	udc = ep->udc;
1818 	/* Attempt to halt IN ep will fail if any transfer requests
1819 	 * are still queue */
1820 	if (value && ep_is_in(ep) && !list_empty(&ep->queue)) {
1821 		status = -EAGAIN;
1822 		goto out;
1823 	}
1824 
1825 	status = 0;
1826 	spin_lock_irqsave(&ep->udc->lock, flags);
1827 	qe_eptx_stall_change(ep, value);
1828 	qe_eprx_stall_change(ep, value);
1829 	spin_unlock_irqrestore(&ep->udc->lock, flags);
1830 
1831 	if (ep->epnum == 0) {
1832 		udc->ep0_state = WAIT_FOR_SETUP;
1833 		udc->ep0_dir = 0;
1834 	}
1835 
1836 	/* set data toggle to DATA0 on clear halt */
1837 	if (value == 0)
1838 		ep->data01 = 0;
1839 out:
1840 	dev_vdbg(udc->dev, "%s %s halt stat %d\n", ep->ep.name,
1841 			value ?  "set" : "clear", status);
1842 
1843 	return status;
1844 }
1845 
1846 static const struct usb_ep_ops qe_ep_ops = {
1847 	.enable = qe_ep_enable,
1848 	.disable = qe_ep_disable,
1849 
1850 	.alloc_request = qe_alloc_request,
1851 	.free_request = qe_free_request,
1852 
1853 	.queue = qe_ep_queue,
1854 	.dequeue = qe_ep_dequeue,
1855 
1856 	.set_halt = qe_ep_set_halt,
1857 };
1858 
1859 /*------------------------------------------------------------------------
1860 	Gadget Driver Layer Operations
1861  ------------------------------------------------------------------------*/
1862 
1863 /* Get the current frame number */
qe_get_frame(struct usb_gadget * gadget)1864 static int qe_get_frame(struct usb_gadget *gadget)
1865 {
1866 	struct qe_udc *udc = container_of(gadget, struct qe_udc, gadget);
1867 	u16 tmp;
1868 
1869 	tmp = in_be16(&udc->usb_param->frame_n);
1870 	if (tmp & 0x8000)
1871 		return tmp & 0x07ff;
1872 	return -EINVAL;
1873 }
1874 
1875 static int fsl_qe_start(struct usb_gadget *gadget,
1876 		struct usb_gadget_driver *driver);
1877 static int fsl_qe_stop(struct usb_gadget *gadget);
1878 
1879 /* defined in usb_gadget.h */
1880 static const struct usb_gadget_ops qe_gadget_ops = {
1881 	.get_frame = qe_get_frame,
1882 	.udc_start = fsl_qe_start,
1883 	.udc_stop = fsl_qe_stop,
1884 };
1885 
1886 /*-------------------------------------------------------------------------
1887 	USB ep0 Setup process in BUS Enumeration
1888  -------------------------------------------------------------------------*/
udc_reset_ep_queue(struct qe_udc * udc,u8 pipe)1889 static int udc_reset_ep_queue(struct qe_udc *udc, u8 pipe)
1890 {
1891 	struct qe_ep *ep = &udc->eps[pipe];
1892 
1893 	nuke(ep, -ECONNRESET);
1894 	ep->tx_req = NULL;
1895 	return 0;
1896 }
1897 
reset_queues(struct qe_udc * udc)1898 static int reset_queues(struct qe_udc *udc)
1899 {
1900 	u8 pipe;
1901 
1902 	for (pipe = 0; pipe < USB_MAX_ENDPOINTS; pipe++)
1903 		udc_reset_ep_queue(udc, pipe);
1904 
1905 	/* report disconnect; the driver is already quiesced */
1906 	spin_unlock(&udc->lock);
1907 	usb_gadget_udc_reset(&udc->gadget, udc->driver);
1908 	spin_lock(&udc->lock);
1909 
1910 	return 0;
1911 }
1912 
ch9setaddress(struct qe_udc * udc,u16 value,u16 index,u16 length)1913 static void ch9setaddress(struct qe_udc *udc, u16 value, u16 index,
1914 			u16 length)
1915 {
1916 	/* Save the new address to device struct */
1917 	udc->device_address = (u8) value;
1918 	/* Update usb state */
1919 	udc->usb_state = USB_STATE_ADDRESS;
1920 
1921 	/* Status phase , send a ZLP */
1922 	if (ep0_prime_status(udc, USB_DIR_IN))
1923 		qe_ep0_stall(udc);
1924 }
1925 
ownercomplete(struct usb_ep * _ep,struct usb_request * _req)1926 static void ownercomplete(struct usb_ep *_ep, struct usb_request *_req)
1927 {
1928 	struct qe_req *req = container_of(_req, struct qe_req, req);
1929 
1930 	req->req.buf = NULL;
1931 	kfree(req);
1932 }
1933 
ch9getstatus(struct qe_udc * udc,u8 request_type,u16 value,u16 index,u16 length)1934 static void ch9getstatus(struct qe_udc *udc, u8 request_type, u16 value,
1935 			u16 index, u16 length)
1936 {
1937 	u16 usb_status = 0;
1938 	struct qe_req *req;
1939 	struct qe_ep *ep;
1940 	int status = 0;
1941 
1942 	ep = &udc->eps[0];
1943 	if ((request_type & USB_RECIP_MASK) == USB_RECIP_DEVICE) {
1944 		/* Get device status */
1945 		usb_status = 1 << USB_DEVICE_SELF_POWERED;
1946 	} else if ((request_type & USB_RECIP_MASK) == USB_RECIP_INTERFACE) {
1947 		/* Get interface status */
1948 		/* We don't have interface information in udc driver */
1949 		usb_status = 0;
1950 	} else if ((request_type & USB_RECIP_MASK) == USB_RECIP_ENDPOINT) {
1951 		/* Get endpoint status */
1952 		int pipe = index & USB_ENDPOINT_NUMBER_MASK;
1953 		struct qe_ep *target_ep = &udc->eps[pipe];
1954 		u16 usep;
1955 
1956 		/* stall if endpoint doesn't exist */
1957 		if (!target_ep->ep.desc)
1958 			goto stall;
1959 
1960 		usep = in_be16(&udc->usb_regs->usb_usep[pipe]);
1961 		if (index & USB_DIR_IN) {
1962 			if (target_ep->dir != USB_DIR_IN)
1963 				goto stall;
1964 			if ((usep & USB_THS_MASK) == USB_THS_STALL)
1965 				usb_status = 1 << USB_ENDPOINT_HALT;
1966 		} else {
1967 			if (target_ep->dir != USB_DIR_OUT)
1968 				goto stall;
1969 			if ((usep & USB_RHS_MASK) == USB_RHS_STALL)
1970 				usb_status = 1 << USB_ENDPOINT_HALT;
1971 		}
1972 	}
1973 
1974 	req = container_of(qe_alloc_request(&ep->ep, GFP_KERNEL),
1975 					struct qe_req, req);
1976 	req->req.length = 2;
1977 	req->req.buf = udc->statusbuf;
1978 	*(u16 *)req->req.buf = cpu_to_le16(usb_status);
1979 	req->req.status = -EINPROGRESS;
1980 	req->req.actual = 0;
1981 	req->req.complete = ownercomplete;
1982 
1983 	udc->ep0_dir = USB_DIR_IN;
1984 
1985 	/* data phase */
1986 	status = __qe_ep_queue(&ep->ep, &req->req);
1987 
1988 	if (status == 0)
1989 		return;
1990 stall:
1991 	dev_err(udc->dev, "Can't respond to getstatus request \n");
1992 	qe_ep0_stall(udc);
1993 }
1994 
1995 /* only handle the setup request, suppose the device in normal status */
setup_received_handle(struct qe_udc * udc,struct usb_ctrlrequest * setup)1996 static void setup_received_handle(struct qe_udc *udc,
1997 				struct usb_ctrlrequest *setup)
1998 {
1999 	/* Fix Endian (udc->local_setup_buff is cpu Endian now)*/
2000 	u16 wValue = le16_to_cpu(setup->wValue);
2001 	u16 wIndex = le16_to_cpu(setup->wIndex);
2002 	u16 wLength = le16_to_cpu(setup->wLength);
2003 
2004 	/* clear the previous request in the ep0 */
2005 	udc_reset_ep_queue(udc, 0);
2006 
2007 	if (setup->bRequestType & USB_DIR_IN)
2008 		udc->ep0_dir = USB_DIR_IN;
2009 	else
2010 		udc->ep0_dir = USB_DIR_OUT;
2011 
2012 	switch (setup->bRequest) {
2013 	case USB_REQ_GET_STATUS:
2014 		/* Data+Status phase form udc */
2015 		if ((setup->bRequestType & (USB_DIR_IN | USB_TYPE_MASK))
2016 					!= (USB_DIR_IN | USB_TYPE_STANDARD))
2017 			break;
2018 		ch9getstatus(udc, setup->bRequestType, wValue, wIndex,
2019 					wLength);
2020 		return;
2021 
2022 	case USB_REQ_SET_ADDRESS:
2023 		/* Status phase from udc */
2024 		if (setup->bRequestType != (USB_DIR_OUT | USB_TYPE_STANDARD |
2025 						USB_RECIP_DEVICE))
2026 			break;
2027 		ch9setaddress(udc, wValue, wIndex, wLength);
2028 		return;
2029 
2030 	case USB_REQ_CLEAR_FEATURE:
2031 	case USB_REQ_SET_FEATURE:
2032 		/* Requests with no data phase, status phase from udc */
2033 		if ((setup->bRequestType & USB_TYPE_MASK)
2034 					!= USB_TYPE_STANDARD)
2035 			break;
2036 
2037 		if ((setup->bRequestType & USB_RECIP_MASK)
2038 				== USB_RECIP_ENDPOINT) {
2039 			int pipe = wIndex & USB_ENDPOINT_NUMBER_MASK;
2040 			struct qe_ep *ep;
2041 
2042 			if (wValue != 0 || wLength != 0
2043 				|| pipe >= USB_MAX_ENDPOINTS)
2044 				break;
2045 			ep = &udc->eps[pipe];
2046 
2047 			spin_unlock(&udc->lock);
2048 			qe_ep_set_halt(&ep->ep,
2049 					(setup->bRequest == USB_REQ_SET_FEATURE)
2050 						? 1 : 0);
2051 			spin_lock(&udc->lock);
2052 		}
2053 
2054 		ep0_prime_status(udc, USB_DIR_IN);
2055 
2056 		return;
2057 
2058 	default:
2059 		break;
2060 	}
2061 
2062 	if (wLength) {
2063 		/* Data phase from gadget, status phase from udc */
2064 		if (setup->bRequestType & USB_DIR_IN) {
2065 			udc->ep0_state = DATA_STATE_XMIT;
2066 			udc->ep0_dir = USB_DIR_IN;
2067 		} else {
2068 			udc->ep0_state = DATA_STATE_RECV;
2069 			udc->ep0_dir = USB_DIR_OUT;
2070 		}
2071 		spin_unlock(&udc->lock);
2072 		if (udc->driver->setup(&udc->gadget,
2073 					&udc->local_setup_buff) < 0)
2074 			qe_ep0_stall(udc);
2075 		spin_lock(&udc->lock);
2076 	} else {
2077 		/* No data phase, IN status from gadget */
2078 		udc->ep0_dir = USB_DIR_IN;
2079 		spin_unlock(&udc->lock);
2080 		if (udc->driver->setup(&udc->gadget,
2081 					&udc->local_setup_buff) < 0)
2082 			qe_ep0_stall(udc);
2083 		spin_lock(&udc->lock);
2084 		udc->ep0_state = DATA_STATE_NEED_ZLP;
2085 	}
2086 }
2087 
2088 /*-------------------------------------------------------------------------
2089 	USB Interrupt handlers
2090  -------------------------------------------------------------------------*/
suspend_irq(struct qe_udc * udc)2091 static void suspend_irq(struct qe_udc *udc)
2092 {
2093 	udc->resume_state = udc->usb_state;
2094 	udc->usb_state = USB_STATE_SUSPENDED;
2095 
2096 	/* report suspend to the driver ,serial.c not support this*/
2097 	if (udc->driver->suspend)
2098 		udc->driver->suspend(&udc->gadget);
2099 }
2100 
resume_irq(struct qe_udc * udc)2101 static void resume_irq(struct qe_udc *udc)
2102 {
2103 	udc->usb_state = udc->resume_state;
2104 	udc->resume_state = 0;
2105 
2106 	/* report resume to the driver , serial.c not support this*/
2107 	if (udc->driver->resume)
2108 		udc->driver->resume(&udc->gadget);
2109 }
2110 
idle_irq(struct qe_udc * udc)2111 static void idle_irq(struct qe_udc *udc)
2112 {
2113 	u8 usbs;
2114 
2115 	usbs = in_8(&udc->usb_regs->usb_usbs);
2116 	if (usbs & USB_IDLE_STATUS_MASK) {
2117 		if ((udc->usb_state) != USB_STATE_SUSPENDED)
2118 			suspend_irq(udc);
2119 	} else {
2120 		if (udc->usb_state == USB_STATE_SUSPENDED)
2121 			resume_irq(udc);
2122 	}
2123 }
2124 
reset_irq(struct qe_udc * udc)2125 static int reset_irq(struct qe_udc *udc)
2126 {
2127 	unsigned char i;
2128 
2129 	if (udc->usb_state == USB_STATE_DEFAULT)
2130 		return 0;
2131 
2132 	qe_usb_disable(udc);
2133 	out_8(&udc->usb_regs->usb_usadr, 0);
2134 
2135 	for (i = 0; i < USB_MAX_ENDPOINTS; i++) {
2136 		if (udc->eps[i].init)
2137 			qe_ep_reset(udc, i);
2138 	}
2139 
2140 	reset_queues(udc);
2141 	udc->usb_state = USB_STATE_DEFAULT;
2142 	udc->ep0_state = WAIT_FOR_SETUP;
2143 	udc->ep0_dir = USB_DIR_OUT;
2144 	qe_usb_enable(udc);
2145 	return 0;
2146 }
2147 
bsy_irq(struct qe_udc * udc)2148 static int bsy_irq(struct qe_udc *udc)
2149 {
2150 	return 0;
2151 }
2152 
txe_irq(struct qe_udc * udc)2153 static int txe_irq(struct qe_udc *udc)
2154 {
2155 	return 0;
2156 }
2157 
2158 /* ep0 tx interrupt also in here */
tx_irq(struct qe_udc * udc)2159 static int tx_irq(struct qe_udc *udc)
2160 {
2161 	struct qe_ep *ep;
2162 	struct qe_bd __iomem *bd;
2163 	int i, res = 0;
2164 
2165 	if ((udc->usb_state == USB_STATE_ADDRESS)
2166 		&& (in_8(&udc->usb_regs->usb_usadr) == 0))
2167 		out_8(&udc->usb_regs->usb_usadr, udc->device_address);
2168 
2169 	for (i = (USB_MAX_ENDPOINTS-1); ((i >= 0) && (res == 0)); i--) {
2170 		ep = &udc->eps[i];
2171 		if (ep && ep->init && (ep->dir != USB_DIR_OUT)) {
2172 			bd = ep->c_txbd;
2173 			if (!(in_be32((u32 __iomem *)bd) & T_R)
2174 						&& (in_be32(&bd->buf))) {
2175 				/* confirm the transmitted bd */
2176 				if (ep->epnum == 0)
2177 					res = qe_ep0_txconf(ep);
2178 				else
2179 					res = qe_ep_txconf(ep);
2180 			}
2181 		}
2182 	}
2183 	return res;
2184 }
2185 
2186 
2187 /* setup packect's rx is handle in the function too */
rx_irq(struct qe_udc * udc)2188 static void rx_irq(struct qe_udc *udc)
2189 {
2190 	struct qe_ep *ep;
2191 	struct qe_bd __iomem *bd;
2192 	int i;
2193 
2194 	for (i = 0; i < USB_MAX_ENDPOINTS; i++) {
2195 		ep = &udc->eps[i];
2196 		if (ep && ep->init && (ep->dir != USB_DIR_IN)) {
2197 			bd = ep->n_rxbd;
2198 			if (!(in_be32((u32 __iomem *)bd) & R_E)
2199 						&& (in_be32(&bd->buf))) {
2200 				if (ep->epnum == 0) {
2201 					qe_ep0_rx(udc);
2202 				} else {
2203 					/*non-setup package receive*/
2204 					qe_ep_rx(ep);
2205 				}
2206 			}
2207 		}
2208 	}
2209 }
2210 
qe_udc_irq(int irq,void * _udc)2211 static irqreturn_t qe_udc_irq(int irq, void *_udc)
2212 {
2213 	struct qe_udc *udc = (struct qe_udc *)_udc;
2214 	u16 irq_src;
2215 	irqreturn_t status = IRQ_NONE;
2216 	unsigned long flags;
2217 
2218 	spin_lock_irqsave(&udc->lock, flags);
2219 
2220 	irq_src = in_be16(&udc->usb_regs->usb_usber) &
2221 		in_be16(&udc->usb_regs->usb_usbmr);
2222 	/* Clear notification bits */
2223 	out_be16(&udc->usb_regs->usb_usber, irq_src);
2224 	/* USB Interrupt */
2225 	if (irq_src & USB_E_IDLE_MASK) {
2226 		idle_irq(udc);
2227 		irq_src &= ~USB_E_IDLE_MASK;
2228 		status = IRQ_HANDLED;
2229 	}
2230 
2231 	if (irq_src & USB_E_TXB_MASK) {
2232 		tx_irq(udc);
2233 		irq_src &= ~USB_E_TXB_MASK;
2234 		status = IRQ_HANDLED;
2235 	}
2236 
2237 	if (irq_src & USB_E_RXB_MASK) {
2238 		rx_irq(udc);
2239 		irq_src &= ~USB_E_RXB_MASK;
2240 		status = IRQ_HANDLED;
2241 	}
2242 
2243 	if (irq_src & USB_E_RESET_MASK) {
2244 		reset_irq(udc);
2245 		irq_src &= ~USB_E_RESET_MASK;
2246 		status = IRQ_HANDLED;
2247 	}
2248 
2249 	if (irq_src & USB_E_BSY_MASK) {
2250 		bsy_irq(udc);
2251 		irq_src &= ~USB_E_BSY_MASK;
2252 		status = IRQ_HANDLED;
2253 	}
2254 
2255 	if (irq_src & USB_E_TXE_MASK) {
2256 		txe_irq(udc);
2257 		irq_src &= ~USB_E_TXE_MASK;
2258 		status = IRQ_HANDLED;
2259 	}
2260 
2261 	spin_unlock_irqrestore(&udc->lock, flags);
2262 
2263 	return status;
2264 }
2265 
2266 /*-------------------------------------------------------------------------
2267 	Gadget driver probe and unregister.
2268  --------------------------------------------------------------------------*/
fsl_qe_start(struct usb_gadget * gadget,struct usb_gadget_driver * driver)2269 static int fsl_qe_start(struct usb_gadget *gadget,
2270 		struct usb_gadget_driver *driver)
2271 {
2272 	struct qe_udc *udc;
2273 	unsigned long flags;
2274 
2275 	udc = container_of(gadget, struct qe_udc, gadget);
2276 	/* lock is needed but whether should use this lock or another */
2277 	spin_lock_irqsave(&udc->lock, flags);
2278 
2279 	driver->driver.bus = NULL;
2280 	/* hook up the driver */
2281 	udc->driver = driver;
2282 	udc->gadget.speed = driver->max_speed;
2283 
2284 	/* Enable IRQ reg and Set usbcmd reg EN bit */
2285 	qe_usb_enable(udc);
2286 
2287 	out_be16(&udc->usb_regs->usb_usber, 0xffff);
2288 	out_be16(&udc->usb_regs->usb_usbmr, USB_E_DEFAULT_DEVICE);
2289 	udc->usb_state = USB_STATE_ATTACHED;
2290 	udc->ep0_state = WAIT_FOR_SETUP;
2291 	udc->ep0_dir = USB_DIR_OUT;
2292 	spin_unlock_irqrestore(&udc->lock, flags);
2293 
2294 	return 0;
2295 }
2296 
fsl_qe_stop(struct usb_gadget * gadget)2297 static int fsl_qe_stop(struct usb_gadget *gadget)
2298 {
2299 	struct qe_udc *udc;
2300 	struct qe_ep *loop_ep;
2301 	unsigned long flags;
2302 
2303 	udc = container_of(gadget, struct qe_udc, gadget);
2304 	/* stop usb controller, disable intr */
2305 	qe_usb_disable(udc);
2306 
2307 	/* in fact, no needed */
2308 	udc->usb_state = USB_STATE_ATTACHED;
2309 	udc->ep0_state = WAIT_FOR_SETUP;
2310 	udc->ep0_dir = 0;
2311 
2312 	/* stand operation */
2313 	spin_lock_irqsave(&udc->lock, flags);
2314 	udc->gadget.speed = USB_SPEED_UNKNOWN;
2315 	nuke(&udc->eps[0], -ESHUTDOWN);
2316 	list_for_each_entry(loop_ep, &udc->gadget.ep_list, ep.ep_list)
2317 		nuke(loop_ep, -ESHUTDOWN);
2318 	spin_unlock_irqrestore(&udc->lock, flags);
2319 
2320 	udc->driver = NULL;
2321 
2322 	return 0;
2323 }
2324 
2325 /* udc structure's alloc and setup, include ep-param alloc */
qe_udc_config(struct platform_device * ofdev)2326 static struct qe_udc *qe_udc_config(struct platform_device *ofdev)
2327 {
2328 	struct qe_udc *udc;
2329 	struct device_node *np = ofdev->dev.of_node;
2330 	unsigned long tmp_addr = 0;
2331 	struct usb_device_para __iomem *usbpram;
2332 	unsigned int i;
2333 	u64 size;
2334 	u32 offset;
2335 
2336 	udc = kzalloc(sizeof(*udc), GFP_KERNEL);
2337 	if (!udc)
2338 		goto cleanup;
2339 
2340 	udc->dev = &ofdev->dev;
2341 
2342 	/* get default address of usb parameter in MURAM from device tree */
2343 	offset = *of_get_address(np, 1, &size, NULL);
2344 	udc->usb_param = cpm_muram_addr(offset);
2345 	memset_io(udc->usb_param, 0, size);
2346 
2347 	usbpram = udc->usb_param;
2348 	out_be16(&usbpram->frame_n, 0);
2349 	out_be32(&usbpram->rstate, 0);
2350 
2351 	tmp_addr = cpm_muram_alloc((USB_MAX_ENDPOINTS *
2352 					sizeof(struct usb_ep_para)),
2353 					   USB_EP_PARA_ALIGNMENT);
2354 	if (IS_ERR_VALUE(tmp_addr))
2355 		goto cleanup;
2356 
2357 	for (i = 0; i < USB_MAX_ENDPOINTS; i++) {
2358 		out_be16(&usbpram->epptr[i], (u16)tmp_addr);
2359 		udc->ep_param[i] = cpm_muram_addr(tmp_addr);
2360 		tmp_addr += 32;
2361 	}
2362 
2363 	memset_io(udc->ep_param[0], 0,
2364 			USB_MAX_ENDPOINTS * sizeof(struct usb_ep_para));
2365 
2366 	udc->resume_state = USB_STATE_NOTATTACHED;
2367 	udc->usb_state = USB_STATE_POWERED;
2368 	udc->ep0_dir = 0;
2369 
2370 	spin_lock_init(&udc->lock);
2371 	return udc;
2372 
2373 cleanup:
2374 	kfree(udc);
2375 	return NULL;
2376 }
2377 
2378 /* USB Controller register init */
qe_udc_reg_init(struct qe_udc * udc)2379 static int qe_udc_reg_init(struct qe_udc *udc)
2380 {
2381 	struct usb_ctlr __iomem *qe_usbregs;
2382 	qe_usbregs = udc->usb_regs;
2383 
2384 	/* Spec says that we must enable the USB controller to change mode. */
2385 	out_8(&qe_usbregs->usb_usmod, 0x01);
2386 	/* Mode changed, now disable it, since muram isn't initialized yet. */
2387 	out_8(&qe_usbregs->usb_usmod, 0x00);
2388 
2389 	/* Initialize the rest. */
2390 	out_be16(&qe_usbregs->usb_usbmr, 0);
2391 	out_8(&qe_usbregs->usb_uscom, 0);
2392 	out_be16(&qe_usbregs->usb_usber, USBER_ALL_CLEAR);
2393 
2394 	return 0;
2395 }
2396 
qe_ep_config(struct qe_udc * udc,unsigned char pipe_num)2397 static int qe_ep_config(struct qe_udc *udc, unsigned char pipe_num)
2398 {
2399 	struct qe_ep *ep = &udc->eps[pipe_num];
2400 
2401 	ep->udc = udc;
2402 	strcpy(ep->name, ep_name[pipe_num]);
2403 	ep->ep.name = ep_name[pipe_num];
2404 
2405 	if (pipe_num == 0) {
2406 		ep->ep.caps.type_control = true;
2407 	} else {
2408 		ep->ep.caps.type_iso = true;
2409 		ep->ep.caps.type_bulk = true;
2410 		ep->ep.caps.type_int = true;
2411 	}
2412 
2413 	ep->ep.caps.dir_in = true;
2414 	ep->ep.caps.dir_out = true;
2415 
2416 	ep->ep.ops = &qe_ep_ops;
2417 	ep->stopped = 1;
2418 	usb_ep_set_maxpacket_limit(&ep->ep, (unsigned short) ~0);
2419 	ep->ep.desc = NULL;
2420 	ep->dir = 0xff;
2421 	ep->epnum = (u8)pipe_num;
2422 	ep->sent = 0;
2423 	ep->last = 0;
2424 	ep->init = 0;
2425 	ep->rxframe = NULL;
2426 	ep->txframe = NULL;
2427 	ep->tx_req = NULL;
2428 	ep->state = EP_STATE_IDLE;
2429 	ep->has_data = 0;
2430 
2431 	/* the queue lists any req for this ep */
2432 	INIT_LIST_HEAD(&ep->queue);
2433 
2434 	/* gagdet.ep_list used for ep_autoconfig so no ep0*/
2435 	if (pipe_num != 0)
2436 		list_add_tail(&ep->ep.ep_list, &udc->gadget.ep_list);
2437 
2438 	ep->gadget = &udc->gadget;
2439 
2440 	return 0;
2441 }
2442 
2443 /*-----------------------------------------------------------------------
2444  *	UDC device Driver operation functions				*
2445  *----------------------------------------------------------------------*/
qe_udc_release(struct device * dev)2446 static void qe_udc_release(struct device *dev)
2447 {
2448 	struct qe_udc *udc = container_of(dev, struct qe_udc, gadget.dev);
2449 	int i;
2450 
2451 	complete(udc->done);
2452 	cpm_muram_free(cpm_muram_offset(udc->ep_param[0]));
2453 	for (i = 0; i < USB_MAX_ENDPOINTS; i++)
2454 		udc->ep_param[i] = NULL;
2455 
2456 	kfree(udc);
2457 }
2458 
2459 /* Driver probe functions */
2460 static const struct of_device_id qe_udc_match[];
qe_udc_probe(struct platform_device * ofdev)2461 static int qe_udc_probe(struct platform_device *ofdev)
2462 {
2463 	struct qe_udc *udc;
2464 	const struct of_device_id *match;
2465 	struct device_node *np = ofdev->dev.of_node;
2466 	struct qe_ep *ep;
2467 	unsigned int ret = 0;
2468 	unsigned int i;
2469 	const void *prop;
2470 
2471 	match = of_match_device(qe_udc_match, &ofdev->dev);
2472 	if (!match)
2473 		return -EINVAL;
2474 
2475 	prop = of_get_property(np, "mode", NULL);
2476 	if (!prop || strcmp(prop, "peripheral"))
2477 		return -ENODEV;
2478 
2479 	/* Initialize the udc structure including QH member and other member */
2480 	udc = qe_udc_config(ofdev);
2481 	if (!udc) {
2482 		dev_err(&ofdev->dev, "failed to initialize\n");
2483 		return -ENOMEM;
2484 	}
2485 
2486 	udc->soc_type = (unsigned long)match->data;
2487 	udc->usb_regs = of_iomap(np, 0);
2488 	if (!udc->usb_regs) {
2489 		ret = -ENOMEM;
2490 		goto err1;
2491 	}
2492 
2493 	/* initialize usb hw reg except for regs for EP,
2494 	 * leave usbintr reg untouched*/
2495 	qe_udc_reg_init(udc);
2496 
2497 	/* here comes the stand operations for probe
2498 	 * set the qe_udc->gadget.xxx */
2499 	udc->gadget.ops = &qe_gadget_ops;
2500 
2501 	/* gadget.ep0 is a pointer */
2502 	udc->gadget.ep0 = &udc->eps[0].ep;
2503 
2504 	INIT_LIST_HEAD(&udc->gadget.ep_list);
2505 
2506 	/* modify in register gadget process */
2507 	udc->gadget.speed = USB_SPEED_UNKNOWN;
2508 
2509 	/* name: Identifies the controller hardware type. */
2510 	udc->gadget.name = driver_name;
2511 	udc->gadget.dev.parent = &ofdev->dev;
2512 
2513 	/* initialize qe_ep struct */
2514 	for (i = 0; i < USB_MAX_ENDPOINTS ; i++) {
2515 		/* because the ep type isn't decide here so
2516 		 * qe_ep_init() should be called in ep_enable() */
2517 
2518 		/* setup the qe_ep struct and link ep.ep.list
2519 		 * into gadget.ep_list */
2520 		qe_ep_config(udc, (unsigned char)i);
2521 	}
2522 
2523 	/* ep0 initialization in here */
2524 	ret = qe_ep_init(udc, 0, &qe_ep0_desc);
2525 	if (ret)
2526 		goto err2;
2527 
2528 	/* create a buf for ZLP send, need to remain zeroed */
2529 	udc->nullbuf = devm_kzalloc(&ofdev->dev, 256, GFP_KERNEL);
2530 	if (udc->nullbuf == NULL) {
2531 		ret = -ENOMEM;
2532 		goto err3;
2533 	}
2534 
2535 	/* buffer for data of get_status request */
2536 	udc->statusbuf = devm_kzalloc(&ofdev->dev, 2, GFP_KERNEL);
2537 	if (udc->statusbuf == NULL) {
2538 		ret = -ENOMEM;
2539 		goto err3;
2540 	}
2541 
2542 	udc->nullp = virt_to_phys((void *)udc->nullbuf);
2543 	if (udc->nullp == DMA_ADDR_INVALID) {
2544 		udc->nullp = dma_map_single(
2545 					udc->gadget.dev.parent,
2546 					udc->nullbuf,
2547 					256,
2548 					DMA_TO_DEVICE);
2549 		udc->nullmap = 1;
2550 	} else {
2551 		dma_sync_single_for_device(udc->gadget.dev.parent,
2552 					udc->nullp, 256,
2553 					DMA_TO_DEVICE);
2554 	}
2555 
2556 	tasklet_setup(&udc->rx_tasklet, ep_rx_tasklet);
2557 	/* request irq and disable DR  */
2558 	udc->usb_irq = irq_of_parse_and_map(np, 0);
2559 	if (!udc->usb_irq) {
2560 		ret = -EINVAL;
2561 		goto err_noirq;
2562 	}
2563 
2564 	ret = request_irq(udc->usb_irq, qe_udc_irq, 0,
2565 				driver_name, udc);
2566 	if (ret) {
2567 		dev_err(udc->dev, "cannot request irq %d err %d\n",
2568 				udc->usb_irq, ret);
2569 		goto err4;
2570 	}
2571 
2572 	ret = usb_add_gadget_udc_release(&ofdev->dev, &udc->gadget,
2573 			qe_udc_release);
2574 	if (ret)
2575 		goto err5;
2576 
2577 	platform_set_drvdata(ofdev, udc);
2578 	dev_info(udc->dev,
2579 			"%s USB controller initialized as device\n",
2580 			(udc->soc_type == PORT_QE) ? "QE" : "CPM");
2581 	return 0;
2582 
2583 err5:
2584 	free_irq(udc->usb_irq, udc);
2585 err4:
2586 	irq_dispose_mapping(udc->usb_irq);
2587 err_noirq:
2588 	if (udc->nullmap) {
2589 		dma_unmap_single(udc->gadget.dev.parent,
2590 			udc->nullp, 256,
2591 				DMA_TO_DEVICE);
2592 			udc->nullp = DMA_ADDR_INVALID;
2593 	} else {
2594 		dma_sync_single_for_cpu(udc->gadget.dev.parent,
2595 			udc->nullp, 256,
2596 				DMA_TO_DEVICE);
2597 	}
2598 err3:
2599 	ep = &udc->eps[0];
2600 	cpm_muram_free(cpm_muram_offset(ep->rxbase));
2601 	kfree(ep->rxframe);
2602 	kfree(ep->rxbuffer);
2603 	kfree(ep->txframe);
2604 err2:
2605 	iounmap(udc->usb_regs);
2606 err1:
2607 	kfree(udc);
2608 	return ret;
2609 }
2610 
2611 #ifdef CONFIG_PM
qe_udc_suspend(struct platform_device * dev,pm_message_t state)2612 static int qe_udc_suspend(struct platform_device *dev, pm_message_t state)
2613 {
2614 	return -ENOTSUPP;
2615 }
2616 
qe_udc_resume(struct platform_device * dev)2617 static int qe_udc_resume(struct platform_device *dev)
2618 {
2619 	return -ENOTSUPP;
2620 }
2621 #endif
2622 
qe_udc_remove(struct platform_device * ofdev)2623 static int qe_udc_remove(struct platform_device *ofdev)
2624 {
2625 	struct qe_udc *udc = platform_get_drvdata(ofdev);
2626 	struct qe_ep *ep;
2627 	unsigned int size;
2628 	DECLARE_COMPLETION_ONSTACK(done);
2629 
2630 	usb_del_gadget_udc(&udc->gadget);
2631 
2632 	udc->done = &done;
2633 	tasklet_disable(&udc->rx_tasklet);
2634 
2635 	if (udc->nullmap) {
2636 		dma_unmap_single(udc->gadget.dev.parent,
2637 			udc->nullp, 256,
2638 				DMA_TO_DEVICE);
2639 			udc->nullp = DMA_ADDR_INVALID;
2640 	} else {
2641 		dma_sync_single_for_cpu(udc->gadget.dev.parent,
2642 			udc->nullp, 256,
2643 				DMA_TO_DEVICE);
2644 	}
2645 
2646 	ep = &udc->eps[0];
2647 	cpm_muram_free(cpm_muram_offset(ep->rxbase));
2648 	size = (ep->ep.maxpacket + USB_CRC_SIZE + 2) * (USB_BDRING_LEN + 1);
2649 
2650 	kfree(ep->rxframe);
2651 	if (ep->rxbufmap) {
2652 		dma_unmap_single(udc->gadget.dev.parent,
2653 				ep->rxbuf_d, size,
2654 				DMA_FROM_DEVICE);
2655 		ep->rxbuf_d = DMA_ADDR_INVALID;
2656 	} else {
2657 		dma_sync_single_for_cpu(udc->gadget.dev.parent,
2658 				ep->rxbuf_d, size,
2659 				DMA_FROM_DEVICE);
2660 	}
2661 
2662 	kfree(ep->rxbuffer);
2663 	kfree(ep->txframe);
2664 
2665 	free_irq(udc->usb_irq, udc);
2666 	irq_dispose_mapping(udc->usb_irq);
2667 
2668 	tasklet_kill(&udc->rx_tasklet);
2669 
2670 	iounmap(udc->usb_regs);
2671 
2672 	/* wait for release() of gadget.dev to free udc */
2673 	wait_for_completion(&done);
2674 
2675 	return 0;
2676 }
2677 
2678 /*-------------------------------------------------------------------------*/
2679 static const struct of_device_id qe_udc_match[] = {
2680 	{
2681 		.compatible = "fsl,mpc8323-qe-usb",
2682 		.data = (void *)PORT_QE,
2683 	},
2684 	{
2685 		.compatible = "fsl,mpc8360-qe-usb",
2686 		.data = (void *)PORT_QE,
2687 	},
2688 	{
2689 		.compatible = "fsl,mpc8272-cpm-usb",
2690 		.data = (void *)PORT_CPM,
2691 	},
2692 	{},
2693 };
2694 
2695 MODULE_DEVICE_TABLE(of, qe_udc_match);
2696 
2697 static struct platform_driver udc_driver = {
2698 	.driver = {
2699 		.name = driver_name,
2700 		.of_match_table = qe_udc_match,
2701 	},
2702 	.probe          = qe_udc_probe,
2703 	.remove         = qe_udc_remove,
2704 #ifdef CONFIG_PM
2705 	.suspend        = qe_udc_suspend,
2706 	.resume         = qe_udc_resume,
2707 #endif
2708 };
2709 
2710 module_platform_driver(udc_driver);
2711 
2712 MODULE_DESCRIPTION(DRIVER_DESC);
2713 MODULE_AUTHOR(DRIVER_AUTHOR);
2714 MODULE_LICENSE("GPL");
2715