xref: /linux/drivers/staging/gdm724x/gdm_usb.c (revision 44f57d78)
1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) 2012 GCT Semiconductor, Inc. All rights reserved. */
3 
4 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
5 
6 #include <linux/module.h>
7 #include <linux/kernel.h>
8 #include <linux/usb.h>
9 #include <linux/sched.h>
10 #include <linux/kthread.h>
11 #include <linux/usb/cdc.h>
12 #include <linux/wait.h>
13 #include <linux/if_ether.h>
14 #include <linux/pm_runtime.h>
15 
16 #include "gdm_usb.h"
17 #include "gdm_lte.h"
18 #include "hci.h"
19 #include "hci_packet.h"
20 #include "gdm_endian.h"
21 
22 #define USB_DEVICE_CDC_DATA(vid, pid) \
23 	.match_flags = USB_DEVICE_ID_MATCH_DEVICE | \
24 		USB_DEVICE_ID_MATCH_INT_CLASS | \
25 		USB_DEVICE_ID_MATCH_INT_SUBCLASS,\
26 	.idVendor = vid,\
27 	.idProduct = pid,\
28 	.bInterfaceClass = USB_CLASS_COMM,\
29 	.bInterfaceSubClass = USB_CDC_SUBCLASS_ETHERNET
30 
31 #define USB_DEVICE_MASS_DATA(vid, pid) \
32 	.match_flags = USB_DEVICE_ID_MATCH_DEVICE | \
33 		USB_DEVICE_ID_MATCH_INT_INFO,\
34 	.idVendor = vid,\
35 	.idProduct = pid,\
36 	.bInterfaceSubClass = USB_SC_SCSI, \
37 	.bInterfaceClass = USB_CLASS_MASS_STORAGE,\
38 	.bInterfaceProtocol = USB_PR_BULK
39 
40 static const struct usb_device_id id_table[] = {
41 	{ USB_DEVICE_CDC_DATA(VID_GCT, PID_GDM7240) }, /* GCT GDM7240 */
42 	{ USB_DEVICE_CDC_DATA(VID_GCT, PID_GDM7243) }, /* GCT GDM7243 */
43 	{ }
44 };
45 
46 MODULE_DEVICE_TABLE(usb, id_table);
47 
48 static void do_tx(struct work_struct *work);
49 static void do_rx(struct work_struct *work);
50 
51 static int gdm_usb_recv(void *priv_dev,
52 			int (*cb)(void *cb_data,
53 				  void *data, int len, int context),
54 			void *cb_data,
55 			int context);
56 
57 static int request_mac_address(struct lte_udev *udev)
58 {
59 	u8 buf[16] = {0,};
60 	struct hci_packet *hci = (struct hci_packet *)buf;
61 	struct usb_device *usbdev = udev->usbdev;
62 	int actual;
63 	int ret = -1;
64 
65 	hci->cmd_evt = gdm_cpu_to_dev16(udev->gdm_ed, LTE_GET_INFORMATION);
66 	hci->len = gdm_cpu_to_dev16(udev->gdm_ed, 1);
67 	hci->data[0] = MAC_ADDRESS;
68 
69 	ret = usb_bulk_msg(usbdev, usb_sndbulkpipe(usbdev, 2), buf, 5,
70 			   &actual, 1000);
71 
72 	udev->request_mac_addr = 1;
73 
74 	return ret;
75 }
76 
77 static struct usb_tx *alloc_tx_struct(int len)
78 {
79 	struct usb_tx *t = NULL;
80 	int ret = 0;
81 
82 	t = kzalloc(sizeof(*t), GFP_ATOMIC);
83 	if (!t) {
84 		ret = -ENOMEM;
85 		goto out;
86 	}
87 
88 	t->urb = usb_alloc_urb(0, GFP_ATOMIC);
89 	if (!(len % 512))
90 		len++;
91 
92 	t->buf = kmalloc(len, GFP_ATOMIC);
93 	if (!t->urb || !t->buf) {
94 		ret = -ENOMEM;
95 		goto out;
96 	}
97 
98 out:
99 	if (ret < 0) {
100 		if (t) {
101 			usb_free_urb(t->urb);
102 			kfree(t->buf);
103 			kfree(t);
104 		}
105 		return NULL;
106 	}
107 
108 	return t;
109 }
110 
111 static struct usb_tx_sdu *alloc_tx_sdu_struct(void)
112 {
113 	struct usb_tx_sdu *t_sdu;
114 
115 	t_sdu = kzalloc(sizeof(*t_sdu), GFP_KERNEL);
116 	if (!t_sdu)
117 		return NULL;
118 
119 	t_sdu->buf = kmalloc(SDU_BUF_SIZE, GFP_KERNEL);
120 	if (!t_sdu->buf) {
121 		kfree(t_sdu);
122 		return NULL;
123 	}
124 
125 	return t_sdu;
126 }
127 
128 static void free_tx_struct(struct usb_tx *t)
129 {
130 	if (t) {
131 		usb_free_urb(t->urb);
132 		kfree(t->buf);
133 		kfree(t);
134 	}
135 }
136 
137 static void free_tx_sdu_struct(struct usb_tx_sdu *t_sdu)
138 {
139 	if (t_sdu) {
140 		kfree(t_sdu->buf);
141 		kfree(t_sdu);
142 	}
143 }
144 
145 static struct usb_tx_sdu *get_tx_sdu_struct(struct tx_cxt *tx, int *no_spc)
146 {
147 	struct usb_tx_sdu *t_sdu;
148 
149 	if (list_empty(&tx->free_list))
150 		return NULL;
151 
152 	t_sdu = list_entry(tx->free_list.next, struct usb_tx_sdu, list);
153 	list_del(&t_sdu->list);
154 
155 	tx->avail_count--;
156 
157 	*no_spc = list_empty(&tx->free_list) ? 1 : 0;
158 
159 	return t_sdu;
160 }
161 
162 static void put_tx_struct(struct tx_cxt *tx, struct usb_tx_sdu *t_sdu)
163 {
164 	list_add_tail(&t_sdu->list, &tx->free_list);
165 	tx->avail_count++;
166 }
167 
168 static struct usb_rx *alloc_rx_struct(void)
169 {
170 	struct usb_rx *r = NULL;
171 	int ret = 0;
172 
173 	r = kmalloc(sizeof(*r), GFP_KERNEL);
174 	if (!r) {
175 		ret = -ENOMEM;
176 		goto out;
177 	}
178 
179 	r->urb = usb_alloc_urb(0, GFP_KERNEL);
180 	r->buf = kmalloc(RX_BUF_SIZE, GFP_KERNEL);
181 	if (!r->urb || !r->buf) {
182 		ret = -ENOMEM;
183 		goto out;
184 	}
185 out:
186 
187 	if (ret < 0) {
188 		if (r) {
189 			usb_free_urb(r->urb);
190 			kfree(r->buf);
191 			kfree(r);
192 		}
193 		return NULL;
194 	}
195 
196 	return r;
197 }
198 
199 static void free_rx_struct(struct usb_rx *r)
200 {
201 	if (r) {
202 		usb_free_urb(r->urb);
203 		kfree(r->buf);
204 		kfree(r);
205 	}
206 }
207 
208 static struct usb_rx *get_rx_struct(struct rx_cxt *rx, int *no_spc)
209 {
210 	struct usb_rx *r;
211 	unsigned long flags;
212 
213 	spin_lock_irqsave(&rx->rx_lock, flags);
214 
215 	if (list_empty(&rx->free_list)) {
216 		spin_unlock_irqrestore(&rx->rx_lock, flags);
217 		return NULL;
218 	}
219 
220 	r = list_entry(rx->free_list.next, struct usb_rx, free_list);
221 	list_del(&r->free_list);
222 
223 	rx->avail_count--;
224 
225 	*no_spc = list_empty(&rx->free_list) ? 1 : 0;
226 
227 	spin_unlock_irqrestore(&rx->rx_lock, flags);
228 
229 	return r;
230 }
231 
232 static void put_rx_struct(struct rx_cxt *rx, struct usb_rx *r)
233 {
234 	unsigned long flags;
235 
236 	spin_lock_irqsave(&rx->rx_lock, flags);
237 
238 	list_add_tail(&r->free_list, &rx->free_list);
239 	rx->avail_count++;
240 
241 	spin_unlock_irqrestore(&rx->rx_lock, flags);
242 }
243 
244 static void release_usb(struct lte_udev *udev)
245 {
246 	struct rx_cxt	*rx = &udev->rx;
247 	struct tx_cxt	*tx = &udev->tx;
248 	struct usb_tx	*t, *t_next;
249 	struct usb_rx	*r, *r_next;
250 	struct usb_tx_sdu	*t_sdu, *t_sdu_next;
251 	unsigned long flags;
252 
253 	spin_lock_irqsave(&tx->lock, flags);
254 	list_for_each_entry_safe(t_sdu, t_sdu_next, &tx->sdu_list, list) {
255 		list_del(&t_sdu->list);
256 		free_tx_sdu_struct(t_sdu);
257 	}
258 
259 	list_for_each_entry_safe(t, t_next, &tx->hci_list, list) {
260 		list_del(&t->list);
261 		free_tx_struct(t);
262 	}
263 
264 	list_for_each_entry_safe(t_sdu, t_sdu_next, &tx->free_list, list) {
265 		list_del(&t_sdu->list);
266 		free_tx_sdu_struct(t_sdu);
267 	}
268 	spin_unlock_irqrestore(&tx->lock, flags);
269 
270 	spin_lock_irqsave(&rx->submit_lock, flags);
271 	list_for_each_entry_safe(r, r_next, &rx->rx_submit_list,
272 				 rx_submit_list) {
273 		spin_unlock_irqrestore(&rx->submit_lock, flags);
274 		usb_kill_urb(r->urb);
275 		spin_lock_irqsave(&rx->submit_lock, flags);
276 	}
277 	spin_unlock_irqrestore(&rx->submit_lock, flags);
278 
279 	spin_lock_irqsave(&rx->rx_lock, flags);
280 	list_for_each_entry_safe(r, r_next, &rx->free_list, free_list) {
281 		list_del(&r->free_list);
282 		free_rx_struct(r);
283 	}
284 	spin_unlock_irqrestore(&rx->rx_lock, flags);
285 
286 	spin_lock_irqsave(&rx->to_host_lock, flags);
287 	list_for_each_entry_safe(r, r_next, &rx->to_host_list, to_host_list) {
288 		if (r->index == (void *)udev) {
289 			list_del(&r->to_host_list);
290 			free_rx_struct(r);
291 		}
292 	}
293 	spin_unlock_irqrestore(&rx->to_host_lock, flags);
294 }
295 
296 static int init_usb(struct lte_udev *udev)
297 {
298 	int ret = 0;
299 	int i;
300 	struct tx_cxt *tx = &udev->tx;
301 	struct rx_cxt *rx = &udev->rx;
302 	struct usb_tx_sdu *t_sdu = NULL;
303 	struct usb_rx *r = NULL;
304 
305 	udev->send_complete = 1;
306 	udev->tx_stop = 0;
307 	udev->request_mac_addr = 0;
308 	udev->usb_state = PM_NORMAL;
309 
310 	INIT_LIST_HEAD(&tx->sdu_list);
311 	INIT_LIST_HEAD(&tx->hci_list);
312 	INIT_LIST_HEAD(&tx->free_list);
313 	INIT_LIST_HEAD(&rx->rx_submit_list);
314 	INIT_LIST_HEAD(&rx->free_list);
315 	INIT_LIST_HEAD(&rx->to_host_list);
316 	spin_lock_init(&tx->lock);
317 	spin_lock_init(&rx->rx_lock);
318 	spin_lock_init(&rx->submit_lock);
319 	spin_lock_init(&rx->to_host_lock);
320 
321 	tx->avail_count = 0;
322 	rx->avail_count = 0;
323 
324 	udev->rx_cb = NULL;
325 
326 	for (i = 0; i < MAX_NUM_SDU_BUF; i++) {
327 		t_sdu = alloc_tx_sdu_struct();
328 		if (!t_sdu) {
329 			ret = -ENOMEM;
330 			goto fail;
331 		}
332 
333 		list_add(&t_sdu->list, &tx->free_list);
334 		tx->avail_count++;
335 	}
336 
337 	for (i = 0; i < MAX_RX_SUBMIT_COUNT * 2; i++) {
338 		r = alloc_rx_struct();
339 		if (!r) {
340 			ret = -ENOMEM;
341 			goto fail;
342 		}
343 
344 		list_add(&r->free_list, &rx->free_list);
345 		rx->avail_count++;
346 	}
347 	INIT_DELAYED_WORK(&udev->work_tx, do_tx);
348 	INIT_DELAYED_WORK(&udev->work_rx, do_rx);
349 	return 0;
350 fail:
351 	release_usb(udev);
352 	return ret;
353 }
354 
355 static int set_mac_address(u8 *data, void *arg)
356 {
357 	struct phy_dev *phy_dev = arg;
358 	struct lte_udev *udev = phy_dev->priv_dev;
359 	struct tlv *tlv = (struct tlv *)data;
360 	u8 mac_address[ETH_ALEN] = {0, };
361 
362 	if (tlv->type == MAC_ADDRESS && udev->request_mac_addr) {
363 		memcpy(mac_address, tlv->data, tlv->len);
364 
365 		if (register_lte_device(phy_dev,
366 					&udev->intf->dev, mac_address) < 0)
367 			pr_err("register lte device failed\n");
368 
369 		udev->request_mac_addr = 0;
370 
371 		return 1;
372 	}
373 
374 	return 0;
375 }
376 
377 static void do_rx(struct work_struct *work)
378 {
379 	struct lte_udev *udev =
380 		container_of(work, struct lte_udev, work_rx.work);
381 	struct rx_cxt *rx = &udev->rx;
382 	struct usb_rx *r;
383 	struct hci_packet *hci;
384 	struct phy_dev *phy_dev;
385 	u16 cmd_evt;
386 	int ret;
387 	unsigned long flags;
388 
389 	while (1) {
390 		spin_lock_irqsave(&rx->to_host_lock, flags);
391 		if (list_empty(&rx->to_host_list)) {
392 			spin_unlock_irqrestore(&rx->to_host_lock, flags);
393 			break;
394 		}
395 		r = list_entry(rx->to_host_list.next,
396 			       struct usb_rx, to_host_list);
397 		list_del(&r->to_host_list);
398 		spin_unlock_irqrestore(&rx->to_host_lock, flags);
399 
400 		phy_dev = r->cb_data;
401 		udev = phy_dev->priv_dev;
402 		hci = (struct hci_packet *)r->buf;
403 		cmd_evt = gdm_dev16_to_cpu(udev->gdm_ed, hci->cmd_evt);
404 
405 		switch (cmd_evt) {
406 		case LTE_GET_INFORMATION_RESULT:
407 			if (set_mac_address(hci->data, r->cb_data) == 0) {
408 				r->callback(r->cb_data,
409 					    r->buf,
410 					    r->urb->actual_length,
411 					    KERNEL_THREAD);
412 			}
413 			break;
414 
415 		default:
416 			if (r->callback) {
417 				ret = r->callback(r->cb_data,
418 						  r->buf,
419 						  r->urb->actual_length,
420 						  KERNEL_THREAD);
421 
422 				if (ret == -EAGAIN)
423 					pr_err("failed to send received data\n");
424 			}
425 			break;
426 		}
427 
428 		put_rx_struct(rx, r);
429 
430 		gdm_usb_recv(udev,
431 			     r->callback,
432 			     r->cb_data,
433 			     USB_COMPLETE);
434 	}
435 }
436 
437 static void remove_rx_submit_list(struct usb_rx *r, struct rx_cxt *rx)
438 {
439 	unsigned long flags;
440 	struct usb_rx	*r_remove, *r_remove_next;
441 
442 	spin_lock_irqsave(&rx->submit_lock, flags);
443 	list_for_each_entry_safe(r_remove, r_remove_next,
444 				 &rx->rx_submit_list, rx_submit_list) {
445 		if (r == r_remove) {
446 			list_del(&r->rx_submit_list);
447 			break;
448 		}
449 	}
450 	spin_unlock_irqrestore(&rx->submit_lock, flags);
451 }
452 
453 static void gdm_usb_rcv_complete(struct urb *urb)
454 {
455 	struct usb_rx *r = urb->context;
456 	struct rx_cxt *rx = r->rx;
457 	unsigned long flags;
458 	struct lte_udev *udev = container_of(r->rx, struct lte_udev, rx);
459 	struct usb_device *usbdev = udev->usbdev;
460 
461 	remove_rx_submit_list(r, rx);
462 
463 	if (!urb->status && r->callback) {
464 		spin_lock_irqsave(&rx->to_host_lock, flags);
465 		list_add_tail(&r->to_host_list, &rx->to_host_list);
466 		schedule_work(&udev->work_rx.work);
467 		spin_unlock_irqrestore(&rx->to_host_lock, flags);
468 	} else {
469 		if (urb->status && udev->usb_state == PM_NORMAL)
470 			dev_err(&urb->dev->dev, "%s: urb status error %d\n",
471 				__func__, urb->status);
472 
473 		put_rx_struct(rx, r);
474 	}
475 
476 	usb_mark_last_busy(usbdev);
477 }
478 
479 static int gdm_usb_recv(void *priv_dev,
480 			int (*cb)(void *cb_data,
481 				  void *data, int len, int context),
482 			void *cb_data,
483 			int context)
484 {
485 	struct lte_udev *udev = priv_dev;
486 	struct usb_device *usbdev = udev->usbdev;
487 	struct rx_cxt *rx = &udev->rx;
488 	struct usb_rx *r;
489 	int no_spc;
490 	int ret;
491 	unsigned long flags;
492 
493 	if (!udev->usbdev) {
494 		pr_err("invalid device\n");
495 		return -ENODEV;
496 	}
497 
498 	r = get_rx_struct(rx, &no_spc);
499 	if (!r) {
500 		pr_err("Out of Memory\n");
501 		return -ENOMEM;
502 	}
503 
504 	udev->rx_cb = cb;
505 	r->callback = cb;
506 	r->cb_data = cb_data;
507 	r->index = (void *)udev;
508 	r->rx = rx;
509 
510 	usb_fill_bulk_urb(r->urb,
511 			  usbdev,
512 			  usb_rcvbulkpipe(usbdev, 0x83),
513 			  r->buf,
514 			  RX_BUF_SIZE,
515 			  gdm_usb_rcv_complete,
516 			  r);
517 
518 	spin_lock_irqsave(&rx->submit_lock, flags);
519 	list_add_tail(&r->rx_submit_list, &rx->rx_submit_list);
520 	spin_unlock_irqrestore(&rx->submit_lock, flags);
521 
522 	if (context == KERNEL_THREAD)
523 		ret = usb_submit_urb(r->urb, GFP_KERNEL);
524 	else
525 		ret = usb_submit_urb(r->urb, GFP_ATOMIC);
526 
527 	if (ret) {
528 		spin_lock_irqsave(&rx->submit_lock, flags);
529 		list_del(&r->rx_submit_list);
530 		spin_unlock_irqrestore(&rx->submit_lock, flags);
531 
532 		pr_err("usb_submit_urb failed (%p)\n", r);
533 		put_rx_struct(rx, r);
534 	}
535 
536 	return ret;
537 }
538 
539 static void gdm_usb_send_complete(struct urb *urb)
540 {
541 	struct usb_tx *t = urb->context;
542 	struct tx_cxt *tx = t->tx;
543 	struct lte_udev *udev = container_of(tx, struct lte_udev, tx);
544 	unsigned long flags;
545 
546 	if (urb->status == -ECONNRESET) {
547 		dev_info(&urb->dev->dev, "CONNRESET\n");
548 		return;
549 	}
550 
551 	if (t->callback)
552 		t->callback(t->cb_data);
553 
554 	free_tx_struct(t);
555 
556 	spin_lock_irqsave(&tx->lock, flags);
557 	udev->send_complete = 1;
558 	schedule_work(&udev->work_tx.work);
559 	spin_unlock_irqrestore(&tx->lock, flags);
560 }
561 
562 static int send_tx_packet(struct usb_device *usbdev, struct usb_tx *t, u32 len)
563 {
564 	int ret = 0;
565 
566 	if (!(len % 512))
567 		len++;
568 
569 	usb_fill_bulk_urb(t->urb,
570 			  usbdev,
571 			  usb_sndbulkpipe(usbdev, 2),
572 			  t->buf,
573 			  len,
574 			  gdm_usb_send_complete,
575 			  t);
576 
577 	ret = usb_submit_urb(t->urb, GFP_ATOMIC);
578 
579 	if (ret)
580 		dev_err(&usbdev->dev, "usb_submit_urb failed: %d\n",
581 			ret);
582 
583 	usb_mark_last_busy(usbdev);
584 
585 	return ret;
586 }
587 
588 static u32 packet_aggregation(struct lte_udev *udev, u8 *send_buf)
589 {
590 	struct tx_cxt *tx = &udev->tx;
591 	struct usb_tx_sdu *t_sdu = NULL;
592 	struct multi_sdu *multi_sdu = (struct multi_sdu *)send_buf;
593 	u16 send_len = 0;
594 	u16 num_packet = 0;
595 	unsigned long flags;
596 
597 	multi_sdu->cmd_evt = gdm_cpu_to_dev16(udev->gdm_ed, LTE_TX_MULTI_SDU);
598 
599 	while (num_packet < MAX_PACKET_IN_MULTI_SDU) {
600 		spin_lock_irqsave(&tx->lock, flags);
601 		if (list_empty(&tx->sdu_list)) {
602 			spin_unlock_irqrestore(&tx->lock, flags);
603 			break;
604 		}
605 
606 		t_sdu = list_entry(tx->sdu_list.next, struct usb_tx_sdu, list);
607 		if (send_len + t_sdu->len > MAX_SDU_SIZE) {
608 			spin_unlock_irqrestore(&tx->lock, flags);
609 			break;
610 		}
611 
612 		list_del(&t_sdu->list);
613 		spin_unlock_irqrestore(&tx->lock, flags);
614 
615 		memcpy(multi_sdu->data + send_len, t_sdu->buf, t_sdu->len);
616 
617 		send_len += (t_sdu->len + 3) & 0xfffc;
618 		num_packet++;
619 
620 		if (tx->avail_count > 10)
621 			t_sdu->callback(t_sdu->cb_data);
622 
623 		spin_lock_irqsave(&tx->lock, flags);
624 		put_tx_struct(tx, t_sdu);
625 		spin_unlock_irqrestore(&tx->lock, flags);
626 	}
627 
628 	multi_sdu->len = gdm_cpu_to_dev16(udev->gdm_ed, send_len);
629 	multi_sdu->num_packet = gdm_cpu_to_dev16(udev->gdm_ed, num_packet);
630 
631 	return send_len + offsetof(struct multi_sdu, data);
632 }
633 
634 static void do_tx(struct work_struct *work)
635 {
636 	struct lte_udev *udev =
637 		container_of(work, struct lte_udev, work_tx.work);
638 	struct usb_device *usbdev = udev->usbdev;
639 	struct tx_cxt *tx = &udev->tx;
640 	struct usb_tx *t = NULL;
641 	int is_send = 0;
642 	u32 len = 0;
643 	unsigned long flags;
644 
645 	if (!usb_autopm_get_interface(udev->intf))
646 		usb_autopm_put_interface(udev->intf);
647 
648 	if (udev->usb_state == PM_SUSPEND)
649 		return;
650 
651 	spin_lock_irqsave(&tx->lock, flags);
652 	if (!udev->send_complete) {
653 		spin_unlock_irqrestore(&tx->lock, flags);
654 		return;
655 	}
656 	udev->send_complete = 0;
657 
658 	if (!list_empty(&tx->hci_list)) {
659 		t = list_entry(tx->hci_list.next, struct usb_tx, list);
660 		list_del(&t->list);
661 		len = t->len;
662 		t->is_sdu = 0;
663 		is_send = 1;
664 	} else if (!list_empty(&tx->sdu_list)) {
665 		if (udev->tx_stop) {
666 			udev->send_complete = 1;
667 			spin_unlock_irqrestore(&tx->lock, flags);
668 			return;
669 		}
670 
671 		t = alloc_tx_struct(TX_BUF_SIZE);
672 		if (!t) {
673 			spin_unlock_irqrestore(&tx->lock, flags);
674 			return;
675 		}
676 		t->callback = NULL;
677 		t->tx = tx;
678 		t->is_sdu = 1;
679 		is_send = 1;
680 	}
681 
682 	if (!is_send) {
683 		udev->send_complete = 1;
684 		spin_unlock_irqrestore(&tx->lock, flags);
685 		return;
686 	}
687 	spin_unlock_irqrestore(&tx->lock, flags);
688 
689 	if (t->is_sdu)
690 		len = packet_aggregation(udev, t->buf);
691 
692 	if (send_tx_packet(usbdev, t, len)) {
693 		pr_err("send_tx_packet failed\n");
694 		t->callback = NULL;
695 		gdm_usb_send_complete(t->urb);
696 	}
697 }
698 
699 #define SDU_PARAM_LEN 12
700 static int gdm_usb_sdu_send(void *priv_dev, void *data, int len,
701 			    unsigned int dft_eps_ID, unsigned int eps_ID,
702 			    void (*cb)(void *data), void *cb_data,
703 			    int dev_idx, int nic_type)
704 {
705 	struct lte_udev *udev = priv_dev;
706 	struct tx_cxt *tx = &udev->tx;
707 	struct usb_tx_sdu *t_sdu;
708 	struct sdu *sdu = NULL;
709 	unsigned long flags;
710 	int no_spc = 0;
711 	u16 send_len;
712 
713 	if (!udev->usbdev) {
714 		pr_err("sdu send - invalid device\n");
715 		return TX_NO_DEV;
716 	}
717 
718 	spin_lock_irqsave(&tx->lock, flags);
719 	t_sdu = get_tx_sdu_struct(tx, &no_spc);
720 	spin_unlock_irqrestore(&tx->lock, flags);
721 
722 	if (!t_sdu) {
723 		pr_err("sdu send - free list empty\n");
724 		return TX_NO_SPC;
725 	}
726 
727 	sdu = (struct sdu *)t_sdu->buf;
728 	sdu->cmd_evt = gdm_cpu_to_dev16(udev->gdm_ed, LTE_TX_SDU);
729 	if (nic_type == NIC_TYPE_ARP) {
730 		send_len = len + SDU_PARAM_LEN;
731 		memcpy(sdu->data, data, len);
732 	} else {
733 		send_len = len - ETH_HLEN;
734 		send_len += SDU_PARAM_LEN;
735 		memcpy(sdu->data, data + ETH_HLEN, len - ETH_HLEN);
736 	}
737 
738 	sdu->len = gdm_cpu_to_dev16(udev->gdm_ed, send_len);
739 	sdu->dft_eps_ID = gdm_cpu_to_dev32(udev->gdm_ed, dft_eps_ID);
740 	sdu->bearer_ID = gdm_cpu_to_dev32(udev->gdm_ed, eps_ID);
741 	sdu->nic_type = gdm_cpu_to_dev32(udev->gdm_ed, nic_type);
742 
743 	t_sdu->len = send_len + HCI_HEADER_SIZE;
744 	t_sdu->callback = cb;
745 	t_sdu->cb_data = cb_data;
746 
747 	spin_lock_irqsave(&tx->lock, flags);
748 	list_add_tail(&t_sdu->list, &tx->sdu_list);
749 	schedule_work(&udev->work_tx.work);
750 	spin_unlock_irqrestore(&tx->lock, flags);
751 
752 	if (no_spc)
753 		return TX_NO_BUFFER;
754 
755 	return 0;
756 }
757 
758 static int gdm_usb_hci_send(void *priv_dev, void *data, int len,
759 			    void (*cb)(void *data), void *cb_data)
760 {
761 	struct lte_udev *udev = priv_dev;
762 	struct tx_cxt *tx = &udev->tx;
763 	struct usb_tx *t;
764 	unsigned long flags;
765 
766 	if (!udev->usbdev) {
767 		pr_err("hci send - invalid device\n");
768 		return -ENODEV;
769 	}
770 
771 	t = alloc_tx_struct(len);
772 	if (!t) {
773 		pr_err("hci_send - out of memory\n");
774 		return -ENOMEM;
775 	}
776 
777 	memcpy(t->buf, data, len);
778 	t->callback = cb;
779 	t->cb_data = cb_data;
780 	t->len = len;
781 	t->tx = tx;
782 	t->is_sdu = 0;
783 
784 	spin_lock_irqsave(&tx->lock, flags);
785 	list_add_tail(&t->list, &tx->hci_list);
786 	schedule_work(&udev->work_tx.work);
787 	spin_unlock_irqrestore(&tx->lock, flags);
788 
789 	return 0;
790 }
791 
792 static u8 gdm_usb_get_endian(void *priv_dev)
793 {
794 	struct lte_udev *udev = priv_dev;
795 
796 	return udev->gdm_ed;
797 }
798 
799 static int gdm_usb_probe(struct usb_interface *intf,
800 			 const struct usb_device_id *id)
801 {
802 	int ret = 0;
803 	struct phy_dev *phy_dev = NULL;
804 	struct lte_udev *udev = NULL;
805 	u16 idVendor, idProduct;
806 	int bInterfaceNumber;
807 	struct usb_device *usbdev = interface_to_usbdev(intf);
808 
809 	bInterfaceNumber = intf->cur_altsetting->desc.bInterfaceNumber;
810 	idVendor = __le16_to_cpu(usbdev->descriptor.idVendor);
811 	idProduct = __le16_to_cpu(usbdev->descriptor.idProduct);
812 
813 	pr_info("net vid = 0x%04x pid = 0x%04x\n", idVendor, idProduct);
814 
815 	if (bInterfaceNumber > NETWORK_INTERFACE) {
816 		pr_info("not a network device\n");
817 		return -ENODEV;
818 	}
819 
820 	phy_dev = kzalloc(sizeof(*phy_dev), GFP_KERNEL);
821 	if (!phy_dev)
822 		return -ENOMEM;
823 
824 	udev = kzalloc(sizeof(*udev), GFP_KERNEL);
825 	if (!udev) {
826 		ret = -ENOMEM;
827 		goto err_udev;
828 	}
829 
830 	phy_dev->priv_dev = (void *)udev;
831 	phy_dev->send_hci_func = gdm_usb_hci_send;
832 	phy_dev->send_sdu_func = gdm_usb_sdu_send;
833 	phy_dev->rcv_func = gdm_usb_recv;
834 	phy_dev->get_endian = gdm_usb_get_endian;
835 
836 	udev->usbdev = usbdev;
837 	ret = init_usb(udev);
838 	if (ret < 0) {
839 		dev_err(intf->usb_dev, "init_usb func failed\n");
840 		goto err_init_usb;
841 	}
842 	udev->intf = intf;
843 
844 	intf->needs_remote_wakeup = 1;
845 	usb_enable_autosuspend(usbdev);
846 	pm_runtime_set_autosuspend_delay(&usbdev->dev, AUTO_SUSPEND_TIMER);
847 
848 	/* List up hosts with big endians, otherwise,
849 	 * defaults to little endian
850 	 */
851 	if (idProduct == PID_GDM7243)
852 		udev->gdm_ed = ENDIANNESS_BIG;
853 	else
854 		udev->gdm_ed = ENDIANNESS_LITTLE;
855 
856 	ret = request_mac_address(udev);
857 	if (ret < 0) {
858 		dev_err(intf->usb_dev, "request Mac address failed\n");
859 		goto err_mac_address;
860 	}
861 
862 	start_rx_proc(phy_dev);
863 	usb_get_dev(usbdev);
864 	usb_set_intfdata(intf, phy_dev);
865 
866 	return 0;
867 
868 err_mac_address:
869 	release_usb(udev);
870 err_init_usb:
871 	kfree(udev);
872 err_udev:
873 	kfree(phy_dev);
874 
875 	return ret;
876 }
877 
878 static void gdm_usb_disconnect(struct usb_interface *intf)
879 {
880 	struct phy_dev *phy_dev;
881 	struct lte_udev *udev;
882 	struct usb_device *usbdev;
883 
884 	usbdev = interface_to_usbdev(intf);
885 	phy_dev = usb_get_intfdata(intf);
886 
887 	udev = phy_dev->priv_dev;
888 	unregister_lte_device(phy_dev);
889 
890 	release_usb(udev);
891 
892 	kfree(udev);
893 	udev = NULL;
894 
895 	kfree(phy_dev);
896 	phy_dev = NULL;
897 
898 	usb_put_dev(usbdev);
899 }
900 
901 static int gdm_usb_suspend(struct usb_interface *intf, pm_message_t pm_msg)
902 {
903 	struct phy_dev *phy_dev;
904 	struct lte_udev *udev;
905 	struct rx_cxt *rx;
906 	struct usb_rx *r;
907 	struct usb_rx *r_next;
908 	unsigned long flags;
909 
910 	phy_dev = usb_get_intfdata(intf);
911 	udev = phy_dev->priv_dev;
912 	rx = &udev->rx;
913 	if (udev->usb_state != PM_NORMAL) {
914 		dev_err(intf->usb_dev, "usb suspend - invalid state\n");
915 		return -1;
916 	}
917 
918 	udev->usb_state = PM_SUSPEND;
919 
920 	spin_lock_irqsave(&rx->submit_lock, flags);
921 	list_for_each_entry_safe(r, r_next, &rx->rx_submit_list,
922 				 rx_submit_list) {
923 		spin_unlock_irqrestore(&rx->submit_lock, flags);
924 		usb_kill_urb(r->urb);
925 		spin_lock_irqsave(&rx->submit_lock, flags);
926 	}
927 	spin_unlock_irqrestore(&rx->submit_lock, flags);
928 
929 	cancel_work_sync(&udev->work_tx.work);
930 	cancel_work_sync(&udev->work_rx.work);
931 
932 	return 0;
933 }
934 
935 static int gdm_usb_resume(struct usb_interface *intf)
936 {
937 	struct phy_dev *phy_dev;
938 	struct lte_udev *udev;
939 	struct tx_cxt *tx;
940 	struct rx_cxt *rx;
941 	unsigned long flags;
942 	int issue_count;
943 	int i;
944 
945 	phy_dev = usb_get_intfdata(intf);
946 	udev = phy_dev->priv_dev;
947 	rx = &udev->rx;
948 
949 	if (udev->usb_state != PM_SUSPEND) {
950 		dev_err(intf->usb_dev, "usb resume - invalid state\n");
951 		return -1;
952 	}
953 	udev->usb_state = PM_NORMAL;
954 
955 	spin_lock_irqsave(&rx->rx_lock, flags);
956 	issue_count = rx->avail_count - MAX_RX_SUBMIT_COUNT;
957 	spin_unlock_irqrestore(&rx->rx_lock, flags);
958 
959 	if (issue_count >= 0) {
960 		for (i = 0; i < issue_count; i++)
961 			gdm_usb_recv(phy_dev->priv_dev,
962 				     udev->rx_cb,
963 				     phy_dev,
964 				     USB_COMPLETE);
965 	}
966 
967 	tx = &udev->tx;
968 	spin_lock_irqsave(&tx->lock, flags);
969 	schedule_work(&udev->work_tx.work);
970 	spin_unlock_irqrestore(&tx->lock, flags);
971 
972 	return 0;
973 }
974 
975 static struct usb_driver gdm_usb_lte_driver = {
976 	.name = "gdm_lte",
977 	.probe = gdm_usb_probe,
978 	.disconnect = gdm_usb_disconnect,
979 	.id_table = id_table,
980 	.supports_autosuspend = 1,
981 	.suspend = gdm_usb_suspend,
982 	.resume = gdm_usb_resume,
983 	.reset_resume = gdm_usb_resume,
984 };
985 
986 static int __init gdm_usb_lte_init(void)
987 {
988 	if (gdm_lte_event_init() < 0) {
989 		pr_err("error creating event\n");
990 		return -1;
991 	}
992 
993 	return usb_register(&gdm_usb_lte_driver);
994 }
995 
996 static void __exit gdm_usb_lte_exit(void)
997 {
998 	gdm_lte_event_exit();
999 
1000 	usb_deregister(&gdm_usb_lte_driver);
1001 }
1002 
1003 module_init(gdm_usb_lte_init);
1004 module_exit(gdm_usb_lte_exit);
1005 
1006 MODULE_VERSION(DRIVER_VERSION);
1007 MODULE_DESCRIPTION("GCT LTE USB Device Driver");
1008 MODULE_LICENSE("GPL");
1009