xref: /linux/drivers/accel/qaic/qaic_drv.c (revision d642ef71)
1 // SPDX-License-Identifier: GPL-2.0-only
2 
3 /* Copyright (c) 2019-2021, The Linux Foundation. All rights reserved. */
4 /* Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved. */
5 
6 #include <linux/delay.h>
7 #include <linux/dma-mapping.h>
8 #include <linux/idr.h>
9 #include <linux/interrupt.h>
10 #include <linux/list.h>
11 #include <linux/kref.h>
12 #include <linux/mhi.h>
13 #include <linux/module.h>
14 #include <linux/msi.h>
15 #include <linux/mutex.h>
16 #include <linux/pci.h>
17 #include <linux/spinlock.h>
18 #include <linux/workqueue.h>
19 #include <linux/wait.h>
20 #include <drm/drm_accel.h>
21 #include <drm/drm_drv.h>
22 #include <drm/drm_file.h>
23 #include <drm/drm_gem.h>
24 #include <drm/drm_ioctl.h>
25 #include <drm/drm_managed.h>
26 #include <uapi/drm/qaic_accel.h>
27 
28 #include "mhi_controller.h"
29 #include "qaic.h"
30 
31 MODULE_IMPORT_NS(DMA_BUF);
32 
33 #define PCI_DEV_AIC100			0xa100
34 #define QAIC_NAME			"qaic"
35 #define QAIC_DESC			"Qualcomm Cloud AI Accelerators"
36 #define CNTL_MAJOR			5
37 #define CNTL_MINOR			0
38 
39 bool datapath_polling;
40 module_param(datapath_polling, bool, 0400);
41 MODULE_PARM_DESC(datapath_polling, "Operate the datapath in polling mode");
42 static bool link_up;
43 static DEFINE_IDA(qaic_usrs);
44 
45 static int qaic_create_drm_device(struct qaic_device *qdev, s32 partition_id);
46 static void qaic_destroy_drm_device(struct qaic_device *qdev, s32 partition_id);
47 
48 static void free_usr(struct kref *kref)
49 {
50 	struct qaic_user *usr = container_of(kref, struct qaic_user, ref_count);
51 
52 	cleanup_srcu_struct(&usr->qddev_lock);
53 	ida_free(&qaic_usrs, usr->handle);
54 	kfree(usr);
55 }
56 
57 static int qaic_open(struct drm_device *dev, struct drm_file *file)
58 {
59 	struct qaic_drm_device *qddev = to_qaic_drm_device(dev);
60 	struct qaic_device *qdev = qddev->qdev;
61 	struct qaic_user *usr;
62 	int rcu_id;
63 	int ret;
64 
65 	rcu_id = srcu_read_lock(&qdev->dev_lock);
66 	if (qdev->in_reset) {
67 		ret = -ENODEV;
68 		goto dev_unlock;
69 	}
70 
71 	usr = kmalloc(sizeof(*usr), GFP_KERNEL);
72 	if (!usr) {
73 		ret = -ENOMEM;
74 		goto dev_unlock;
75 	}
76 
77 	usr->handle = ida_alloc(&qaic_usrs, GFP_KERNEL);
78 	if (usr->handle < 0) {
79 		ret = usr->handle;
80 		goto free_usr;
81 	}
82 	usr->qddev = qddev;
83 	atomic_set(&usr->chunk_id, 0);
84 	init_srcu_struct(&usr->qddev_lock);
85 	kref_init(&usr->ref_count);
86 
87 	ret = mutex_lock_interruptible(&qddev->users_mutex);
88 	if (ret)
89 		goto cleanup_usr;
90 
91 	list_add(&usr->node, &qddev->users);
92 	mutex_unlock(&qddev->users_mutex);
93 
94 	file->driver_priv = usr;
95 
96 	srcu_read_unlock(&qdev->dev_lock, rcu_id);
97 	return 0;
98 
99 cleanup_usr:
100 	cleanup_srcu_struct(&usr->qddev_lock);
101 	ida_free(&qaic_usrs, usr->handle);
102 free_usr:
103 	kfree(usr);
104 dev_unlock:
105 	srcu_read_unlock(&qdev->dev_lock, rcu_id);
106 	return ret;
107 }
108 
109 static void qaic_postclose(struct drm_device *dev, struct drm_file *file)
110 {
111 	struct qaic_user *usr = file->driver_priv;
112 	struct qaic_drm_device *qddev;
113 	struct qaic_device *qdev;
114 	int qdev_rcu_id;
115 	int usr_rcu_id;
116 	int i;
117 
118 	qddev = usr->qddev;
119 	usr_rcu_id = srcu_read_lock(&usr->qddev_lock);
120 	if (qddev) {
121 		qdev = qddev->qdev;
122 		qdev_rcu_id = srcu_read_lock(&qdev->dev_lock);
123 		if (!qdev->in_reset) {
124 			qaic_release_usr(qdev, usr);
125 			for (i = 0; i < qdev->num_dbc; ++i)
126 				if (qdev->dbc[i].usr && qdev->dbc[i].usr->handle == usr->handle)
127 					release_dbc(qdev, i);
128 		}
129 		srcu_read_unlock(&qdev->dev_lock, qdev_rcu_id);
130 
131 		mutex_lock(&qddev->users_mutex);
132 		if (!list_empty(&usr->node))
133 			list_del_init(&usr->node);
134 		mutex_unlock(&qddev->users_mutex);
135 	}
136 
137 	srcu_read_unlock(&usr->qddev_lock, usr_rcu_id);
138 	kref_put(&usr->ref_count, free_usr);
139 
140 	file->driver_priv = NULL;
141 }
142 
143 DEFINE_DRM_ACCEL_FOPS(qaic_accel_fops);
144 
145 static const struct drm_ioctl_desc qaic_drm_ioctls[] = {
146 	DRM_IOCTL_DEF_DRV(QAIC_MANAGE, qaic_manage_ioctl, 0),
147 	DRM_IOCTL_DEF_DRV(QAIC_CREATE_BO, qaic_create_bo_ioctl, 0),
148 	DRM_IOCTL_DEF_DRV(QAIC_MMAP_BO, qaic_mmap_bo_ioctl, 0),
149 	DRM_IOCTL_DEF_DRV(QAIC_ATTACH_SLICE_BO, qaic_attach_slice_bo_ioctl, 0),
150 	DRM_IOCTL_DEF_DRV(QAIC_EXECUTE_BO, qaic_execute_bo_ioctl, 0),
151 	DRM_IOCTL_DEF_DRV(QAIC_PARTIAL_EXECUTE_BO, qaic_partial_execute_bo_ioctl, 0),
152 	DRM_IOCTL_DEF_DRV(QAIC_WAIT_BO, qaic_wait_bo_ioctl, 0),
153 	DRM_IOCTL_DEF_DRV(QAIC_PERF_STATS_BO, qaic_perf_stats_bo_ioctl, 0),
154 	DRM_IOCTL_DEF_DRV(QAIC_DETACH_SLICE_BO, qaic_detach_slice_bo_ioctl, 0),
155 };
156 
157 static const struct drm_driver qaic_accel_driver = {
158 	.driver_features	= DRIVER_GEM | DRIVER_COMPUTE_ACCEL,
159 
160 	.name			= QAIC_NAME,
161 	.desc			= QAIC_DESC,
162 	.date			= "20190618",
163 
164 	.fops			= &qaic_accel_fops,
165 	.open			= qaic_open,
166 	.postclose		= qaic_postclose,
167 
168 	.ioctls			= qaic_drm_ioctls,
169 	.num_ioctls		= ARRAY_SIZE(qaic_drm_ioctls),
170 	.gem_prime_import	= qaic_gem_prime_import,
171 };
172 
173 static int qaic_create_drm_device(struct qaic_device *qdev, s32 partition_id)
174 {
175 	struct qaic_drm_device *qddev = qdev->qddev;
176 	struct drm_device *drm = to_drm(qddev);
177 	int ret;
178 
179 	/* Hold off implementing partitions until the uapi is determined */
180 	if (partition_id != QAIC_NO_PARTITION)
181 		return -EINVAL;
182 
183 	qddev->partition_id = partition_id;
184 
185 	/*
186 	 * drm_dev_unregister() sets the driver data to NULL and
187 	 * drm_dev_register() does not update the driver data. During a SOC
188 	 * reset drm dev is unregistered and registered again leaving the
189 	 * driver data to NULL.
190 	 */
191 	dev_set_drvdata(to_accel_kdev(qddev), drm->accel);
192 	ret = drm_dev_register(drm, 0);
193 	if (ret)
194 		pci_dbg(qdev->pdev, "drm_dev_register failed %d\n", ret);
195 
196 	return ret;
197 }
198 
199 static void qaic_destroy_drm_device(struct qaic_device *qdev, s32 partition_id)
200 {
201 	struct qaic_drm_device *qddev = qdev->qddev;
202 	struct drm_device *drm = to_drm(qddev);
203 	struct qaic_user *usr;
204 
205 	drm_dev_get(drm);
206 	drm_dev_unregister(drm);
207 	qddev->partition_id = 0;
208 	/*
209 	 * Existing users get unresolvable errors till they close FDs.
210 	 * Need to sync carefully with users calling close(). The
211 	 * list of users can be modified elsewhere when the lock isn't
212 	 * held here, but the sync'ing the srcu with the mutex held
213 	 * could deadlock. Grab the mutex so that the list will be
214 	 * unmodified. The user we get will exist as long as the
215 	 * lock is held. Signal that the qcdev is going away, and
216 	 * grab a reference to the user so they don't go away for
217 	 * synchronize_srcu(). Then release the mutex to avoid
218 	 * deadlock and make sure the user has observed the signal.
219 	 * With the lock released, we cannot maintain any state of the
220 	 * user list.
221 	 */
222 	mutex_lock(&qddev->users_mutex);
223 	while (!list_empty(&qddev->users)) {
224 		usr = list_first_entry(&qddev->users, struct qaic_user, node);
225 		list_del_init(&usr->node);
226 		kref_get(&usr->ref_count);
227 		usr->qddev = NULL;
228 		mutex_unlock(&qddev->users_mutex);
229 		synchronize_srcu(&usr->qddev_lock);
230 		kref_put(&usr->ref_count, free_usr);
231 		mutex_lock(&qddev->users_mutex);
232 	}
233 	mutex_unlock(&qddev->users_mutex);
234 	drm_dev_put(drm);
235 }
236 
237 static int qaic_mhi_probe(struct mhi_device *mhi_dev, const struct mhi_device_id *id)
238 {
239 	u16 major = -1, minor = -1;
240 	struct qaic_device *qdev;
241 	int ret;
242 
243 	/*
244 	 * Invoking this function indicates that the control channel to the
245 	 * device is available. We use that as a signal to indicate that
246 	 * the device side firmware has booted. The device side firmware
247 	 * manages the device resources, so we need to communicate with it
248 	 * via the control channel in order to utilize the device. Therefore
249 	 * we wait until this signal to create the drm dev that userspace will
250 	 * use to control the device, because without the device side firmware,
251 	 * userspace can't do anything useful.
252 	 */
253 
254 	qdev = pci_get_drvdata(to_pci_dev(mhi_dev->mhi_cntrl->cntrl_dev));
255 
256 	qdev->in_reset = false;
257 
258 	dev_set_drvdata(&mhi_dev->dev, qdev);
259 	qdev->cntl_ch = mhi_dev;
260 
261 	ret = qaic_control_open(qdev);
262 	if (ret) {
263 		pci_dbg(qdev->pdev, "%s: control_open failed %d\n", __func__, ret);
264 		return ret;
265 	}
266 
267 	ret = get_cntl_version(qdev, NULL, &major, &minor);
268 	if (ret || major != CNTL_MAJOR || minor > CNTL_MINOR) {
269 		pci_err(qdev->pdev, "%s: Control protocol version (%d.%d) not supported. Supported version is (%d.%d). Ret: %d\n",
270 			__func__, major, minor, CNTL_MAJOR, CNTL_MINOR, ret);
271 		ret = -EINVAL;
272 		goto close_control;
273 	}
274 
275 	ret = qaic_create_drm_device(qdev, QAIC_NO_PARTITION);
276 
277 	return ret;
278 
279 close_control:
280 	qaic_control_close(qdev);
281 	return ret;
282 }
283 
284 static void qaic_mhi_remove(struct mhi_device *mhi_dev)
285 {
286 /* This is redundant since we have already observed the device crash */
287 }
288 
289 static void qaic_notify_reset(struct qaic_device *qdev)
290 {
291 	int i;
292 
293 	qdev->in_reset = true;
294 	/* wake up any waiters to avoid waiting for timeouts at sync */
295 	wake_all_cntl(qdev);
296 	for (i = 0; i < qdev->num_dbc; ++i)
297 		wakeup_dbc(qdev, i);
298 	synchronize_srcu(&qdev->dev_lock);
299 }
300 
301 void qaic_dev_reset_clean_local_state(struct qaic_device *qdev, bool exit_reset)
302 {
303 	int i;
304 
305 	qaic_notify_reset(qdev);
306 
307 	/* remove drmdevs to prevent new users from coming in */
308 	qaic_destroy_drm_device(qdev, QAIC_NO_PARTITION);
309 
310 	/* start tearing things down */
311 	for (i = 0; i < qdev->num_dbc; ++i)
312 		release_dbc(qdev, i);
313 
314 	if (exit_reset)
315 		qdev->in_reset = false;
316 }
317 
318 static void cleanup_qdev(struct qaic_device *qdev)
319 {
320 	int i;
321 
322 	for (i = 0; i < qdev->num_dbc; ++i)
323 		cleanup_srcu_struct(&qdev->dbc[i].ch_lock);
324 	cleanup_srcu_struct(&qdev->dev_lock);
325 	pci_set_drvdata(qdev->pdev, NULL);
326 	destroy_workqueue(qdev->cntl_wq);
327 }
328 
329 static struct qaic_device *create_qdev(struct pci_dev *pdev, const struct pci_device_id *id)
330 {
331 	struct qaic_drm_device *qddev;
332 	struct qaic_device *qdev;
333 	int i;
334 
335 	qdev = devm_kzalloc(&pdev->dev, sizeof(*qdev), GFP_KERNEL);
336 	if (!qdev)
337 		return NULL;
338 
339 	if (id->device == PCI_DEV_AIC100) {
340 		qdev->num_dbc = 16;
341 		qdev->dbc = devm_kcalloc(&pdev->dev, qdev->num_dbc, sizeof(*qdev->dbc), GFP_KERNEL);
342 		if (!qdev->dbc)
343 			return NULL;
344 	}
345 
346 	qdev->cntl_wq = alloc_workqueue("qaic_cntl", WQ_UNBOUND, 0);
347 	if (!qdev->cntl_wq)
348 		return NULL;
349 
350 	pci_set_drvdata(pdev, qdev);
351 	qdev->pdev = pdev;
352 
353 	mutex_init(&qdev->cntl_mutex);
354 	INIT_LIST_HEAD(&qdev->cntl_xfer_list);
355 	init_srcu_struct(&qdev->dev_lock);
356 
357 	for (i = 0; i < qdev->num_dbc; ++i) {
358 		spin_lock_init(&qdev->dbc[i].xfer_lock);
359 		qdev->dbc[i].qdev = qdev;
360 		qdev->dbc[i].id = i;
361 		INIT_LIST_HEAD(&qdev->dbc[i].xfer_list);
362 		init_srcu_struct(&qdev->dbc[i].ch_lock);
363 		init_waitqueue_head(&qdev->dbc[i].dbc_release);
364 		INIT_LIST_HEAD(&qdev->dbc[i].bo_lists);
365 	}
366 
367 	qddev = devm_drm_dev_alloc(&pdev->dev, &qaic_accel_driver, struct qaic_drm_device, drm);
368 	if (IS_ERR(qddev)) {
369 		cleanup_qdev(qdev);
370 		return NULL;
371 	}
372 
373 	drmm_mutex_init(to_drm(qddev), &qddev->users_mutex);
374 	INIT_LIST_HEAD(&qddev->users);
375 	qddev->qdev = qdev;
376 	qdev->qddev = qddev;
377 
378 	return qdev;
379 }
380 
381 static int init_pci(struct qaic_device *qdev, struct pci_dev *pdev)
382 {
383 	int bars;
384 	int ret;
385 
386 	bars = pci_select_bars(pdev, IORESOURCE_MEM);
387 
388 	/* make sure the device has the expected BARs */
389 	if (bars != (BIT(0) | BIT(2) | BIT(4))) {
390 		pci_dbg(pdev, "%s: expected BARs 0, 2, and 4 not found in device. Found 0x%x\n",
391 			__func__, bars);
392 		return -EINVAL;
393 	}
394 
395 	ret = pcim_enable_device(pdev);
396 	if (ret)
397 		return ret;
398 
399 	ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
400 	if (ret)
401 		return ret;
402 	ret = dma_set_max_seg_size(&pdev->dev, UINT_MAX);
403 	if (ret)
404 		return ret;
405 
406 	qdev->bar_0 = devm_ioremap_resource(&pdev->dev, &pdev->resource[0]);
407 	if (IS_ERR(qdev->bar_0))
408 		return PTR_ERR(qdev->bar_0);
409 
410 	qdev->bar_2 = devm_ioremap_resource(&pdev->dev, &pdev->resource[2]);
411 	if (IS_ERR(qdev->bar_2))
412 		return PTR_ERR(qdev->bar_2);
413 
414 	/* Managed release since we use pcim_enable_device above */
415 	pci_set_master(pdev);
416 
417 	return 0;
418 }
419 
420 static int init_msi(struct qaic_device *qdev, struct pci_dev *pdev)
421 {
422 	int mhi_irq;
423 	int ret;
424 	int i;
425 
426 	/* Managed release since we use pcim_enable_device */
427 	ret = pci_alloc_irq_vectors(pdev, 1, 32, PCI_IRQ_MSI);
428 	if (ret < 0)
429 		return ret;
430 
431 	if (ret < 32) {
432 		pci_err(pdev, "%s: Requested 32 MSIs. Obtained %d MSIs which is less than the 32 required.\n",
433 			__func__, ret);
434 		return -ENODEV;
435 	}
436 
437 	mhi_irq = pci_irq_vector(pdev, 0);
438 	if (mhi_irq < 0)
439 		return mhi_irq;
440 
441 	for (i = 0; i < qdev->num_dbc; ++i) {
442 		ret = devm_request_threaded_irq(&pdev->dev, pci_irq_vector(pdev, i + 1),
443 						dbc_irq_handler, dbc_irq_threaded_fn, IRQF_SHARED,
444 						"qaic_dbc", &qdev->dbc[i]);
445 		if (ret)
446 			return ret;
447 
448 		if (datapath_polling) {
449 			qdev->dbc[i].irq = pci_irq_vector(pdev, i + 1);
450 			disable_irq_nosync(qdev->dbc[i].irq);
451 			INIT_WORK(&qdev->dbc[i].poll_work, irq_polling_work);
452 		}
453 	}
454 
455 	return mhi_irq;
456 }
457 
458 static int qaic_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
459 {
460 	struct qaic_device *qdev;
461 	int mhi_irq;
462 	int ret;
463 	int i;
464 
465 	qdev = create_qdev(pdev, id);
466 	if (!qdev)
467 		return -ENOMEM;
468 
469 	ret = init_pci(qdev, pdev);
470 	if (ret)
471 		goto cleanup_qdev;
472 
473 	for (i = 0; i < qdev->num_dbc; ++i)
474 		qdev->dbc[i].dbc_base = qdev->bar_2 + QAIC_DBC_OFF(i);
475 
476 	mhi_irq = init_msi(qdev, pdev);
477 	if (mhi_irq < 0) {
478 		ret = mhi_irq;
479 		goto cleanup_qdev;
480 	}
481 
482 	qdev->mhi_cntrl = qaic_mhi_register_controller(pdev, qdev->bar_0, mhi_irq);
483 	if (IS_ERR(qdev->mhi_cntrl)) {
484 		ret = PTR_ERR(qdev->mhi_cntrl);
485 		goto cleanup_qdev;
486 	}
487 
488 	return 0;
489 
490 cleanup_qdev:
491 	cleanup_qdev(qdev);
492 	return ret;
493 }
494 
495 static void qaic_pci_remove(struct pci_dev *pdev)
496 {
497 	struct qaic_device *qdev = pci_get_drvdata(pdev);
498 
499 	if (!qdev)
500 		return;
501 
502 	qaic_dev_reset_clean_local_state(qdev, false);
503 	qaic_mhi_free_controller(qdev->mhi_cntrl, link_up);
504 	cleanup_qdev(qdev);
505 }
506 
507 static void qaic_pci_shutdown(struct pci_dev *pdev)
508 {
509 	/* see qaic_exit for what link_up is doing */
510 	link_up = true;
511 	qaic_pci_remove(pdev);
512 }
513 
514 static pci_ers_result_t qaic_pci_error_detected(struct pci_dev *pdev, pci_channel_state_t error)
515 {
516 	return PCI_ERS_RESULT_NEED_RESET;
517 }
518 
519 static void qaic_pci_reset_prepare(struct pci_dev *pdev)
520 {
521 	struct qaic_device *qdev = pci_get_drvdata(pdev);
522 
523 	qaic_notify_reset(qdev);
524 	qaic_mhi_start_reset(qdev->mhi_cntrl);
525 	qaic_dev_reset_clean_local_state(qdev, false);
526 }
527 
528 static void qaic_pci_reset_done(struct pci_dev *pdev)
529 {
530 	struct qaic_device *qdev = pci_get_drvdata(pdev);
531 
532 	qdev->in_reset = false;
533 	qaic_mhi_reset_done(qdev->mhi_cntrl);
534 }
535 
536 static const struct mhi_device_id qaic_mhi_match_table[] = {
537 	{ .chan = "QAIC_CONTROL", },
538 	{},
539 };
540 
541 static struct mhi_driver qaic_mhi_driver = {
542 	.id_table = qaic_mhi_match_table,
543 	.remove = qaic_mhi_remove,
544 	.probe = qaic_mhi_probe,
545 	.ul_xfer_cb = qaic_mhi_ul_xfer_cb,
546 	.dl_xfer_cb = qaic_mhi_dl_xfer_cb,
547 	.driver = {
548 		.name = "qaic_mhi",
549 	},
550 };
551 
552 static const struct pci_device_id qaic_ids[] = {
553 	{ PCI_DEVICE(PCI_VENDOR_ID_QCOM, PCI_DEV_AIC100), },
554 	{ }
555 };
556 MODULE_DEVICE_TABLE(pci, qaic_ids);
557 
558 static const struct pci_error_handlers qaic_pci_err_handler = {
559 	.error_detected = qaic_pci_error_detected,
560 	.reset_prepare = qaic_pci_reset_prepare,
561 	.reset_done = qaic_pci_reset_done,
562 };
563 
564 static struct pci_driver qaic_pci_driver = {
565 	.name = QAIC_NAME,
566 	.id_table = qaic_ids,
567 	.probe = qaic_pci_probe,
568 	.remove = qaic_pci_remove,
569 	.shutdown = qaic_pci_shutdown,
570 	.err_handler = &qaic_pci_err_handler,
571 };
572 
573 static int __init qaic_init(void)
574 {
575 	int ret;
576 
577 	ret = pci_register_driver(&qaic_pci_driver);
578 	if (ret) {
579 		pr_debug("qaic: pci_register_driver failed %d\n", ret);
580 		return ret;
581 	}
582 
583 	ret = mhi_driver_register(&qaic_mhi_driver);
584 	if (ret) {
585 		pr_debug("qaic: mhi_driver_register failed %d\n", ret);
586 		goto free_pci;
587 	}
588 
589 	return 0;
590 
591 free_pci:
592 	pci_unregister_driver(&qaic_pci_driver);
593 	return ret;
594 }
595 
596 static void __exit qaic_exit(void)
597 {
598 	/*
599 	 * We assume that qaic_pci_remove() is called due to a hotplug event
600 	 * which would mean that the link is down, and thus
601 	 * qaic_mhi_free_controller() should not try to access the device during
602 	 * cleanup.
603 	 * We call pci_unregister_driver() below, which also triggers
604 	 * qaic_pci_remove(), but since this is module exit, we expect the link
605 	 * to the device to be up, in which case qaic_mhi_free_controller()
606 	 * should try to access the device during cleanup to put the device in
607 	 * a sane state.
608 	 * For that reason, we set link_up here to let qaic_mhi_free_controller
609 	 * know the expected link state. Since the module is going to be
610 	 * removed at the end of this, we don't need to worry about
611 	 * reinitializing the link_up state after the cleanup is done.
612 	 */
613 	link_up = true;
614 	mhi_driver_unregister(&qaic_mhi_driver);
615 	pci_unregister_driver(&qaic_pci_driver);
616 }
617 
618 module_init(qaic_init);
619 module_exit(qaic_exit);
620 
621 MODULE_AUTHOR(QAIC_DESC " Kernel Driver Team");
622 MODULE_DESCRIPTION(QAIC_DESC " Accel Driver");
623 MODULE_LICENSE("GPL");
624