xref: /linux/drivers/pci/switch/switchtec.c (revision f86fd32d)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Microsemi Switchtec(tm) PCIe Management Driver
4  * Copyright (c) 2017, Microsemi Corporation
5  */
6 
7 #include <linux/switchtec.h>
8 #include <linux/switchtec_ioctl.h>
9 
10 #include <linux/interrupt.h>
11 #include <linux/module.h>
12 #include <linux/fs.h>
13 #include <linux/uaccess.h>
14 #include <linux/poll.h>
15 #include <linux/wait.h>
16 #include <linux/io-64-nonatomic-lo-hi.h>
17 #include <linux/nospec.h>
18 
19 MODULE_DESCRIPTION("Microsemi Switchtec(tm) PCIe Management Driver");
20 MODULE_VERSION("0.1");
21 MODULE_LICENSE("GPL");
22 MODULE_AUTHOR("Microsemi Corporation");
23 
24 static int max_devices = 16;
25 module_param(max_devices, int, 0644);
26 MODULE_PARM_DESC(max_devices, "max number of switchtec device instances");
27 
28 static bool use_dma_mrpc = 1;
29 module_param(use_dma_mrpc, bool, 0644);
30 MODULE_PARM_DESC(use_dma_mrpc,
31 		 "Enable the use of the DMA MRPC feature");
32 
33 static int nirqs = 32;
34 module_param(nirqs, int, 0644);
35 MODULE_PARM_DESC(nirqs, "number of interrupts to allocate (more may be useful for NTB applications)");
36 
37 static dev_t switchtec_devt;
38 static DEFINE_IDA(switchtec_minor_ida);
39 
40 struct class *switchtec_class;
41 EXPORT_SYMBOL_GPL(switchtec_class);
42 
43 enum mrpc_state {
44 	MRPC_IDLE = 0,
45 	MRPC_QUEUED,
46 	MRPC_RUNNING,
47 	MRPC_DONE,
48 };
49 
50 struct switchtec_user {
51 	struct switchtec_dev *stdev;
52 
53 	enum mrpc_state state;
54 
55 	struct completion comp;
56 	struct kref kref;
57 	struct list_head list;
58 
59 	u32 cmd;
60 	u32 status;
61 	u32 return_code;
62 	size_t data_len;
63 	size_t read_len;
64 	unsigned char data[SWITCHTEC_MRPC_PAYLOAD_SIZE];
65 	int event_cnt;
66 };
67 
68 static struct switchtec_user *stuser_create(struct switchtec_dev *stdev)
69 {
70 	struct switchtec_user *stuser;
71 
72 	stuser = kzalloc(sizeof(*stuser), GFP_KERNEL);
73 	if (!stuser)
74 		return ERR_PTR(-ENOMEM);
75 
76 	get_device(&stdev->dev);
77 	stuser->stdev = stdev;
78 	kref_init(&stuser->kref);
79 	INIT_LIST_HEAD(&stuser->list);
80 	init_completion(&stuser->comp);
81 	stuser->event_cnt = atomic_read(&stdev->event_cnt);
82 
83 	dev_dbg(&stdev->dev, "%s: %p\n", __func__, stuser);
84 
85 	return stuser;
86 }
87 
88 static void stuser_free(struct kref *kref)
89 {
90 	struct switchtec_user *stuser;
91 
92 	stuser = container_of(kref, struct switchtec_user, kref);
93 
94 	dev_dbg(&stuser->stdev->dev, "%s: %p\n", __func__, stuser);
95 
96 	put_device(&stuser->stdev->dev);
97 	kfree(stuser);
98 }
99 
100 static void stuser_put(struct switchtec_user *stuser)
101 {
102 	kref_put(&stuser->kref, stuser_free);
103 }
104 
105 static void stuser_set_state(struct switchtec_user *stuser,
106 			     enum mrpc_state state)
107 {
108 	/* requires the mrpc_mutex to already be held when called */
109 
110 	const char * const state_names[] = {
111 		[MRPC_IDLE] = "IDLE",
112 		[MRPC_QUEUED] = "QUEUED",
113 		[MRPC_RUNNING] = "RUNNING",
114 		[MRPC_DONE] = "DONE",
115 	};
116 
117 	stuser->state = state;
118 
119 	dev_dbg(&stuser->stdev->dev, "stuser state %p -> %s",
120 		stuser, state_names[state]);
121 }
122 
123 static void mrpc_complete_cmd(struct switchtec_dev *stdev);
124 
125 static void flush_wc_buf(struct switchtec_dev *stdev)
126 {
127 	struct ntb_dbmsg_regs __iomem *mmio_dbmsg;
128 
129 	/*
130 	 * odb (outbound doorbell) register is processed by low latency
131 	 * hardware and w/o side effect
132 	 */
133 	mmio_dbmsg = (void __iomem *)stdev->mmio_ntb +
134 		SWITCHTEC_NTB_REG_DBMSG_OFFSET;
135 	ioread32(&mmio_dbmsg->odb);
136 }
137 
138 static void mrpc_cmd_submit(struct switchtec_dev *stdev)
139 {
140 	/* requires the mrpc_mutex to already be held when called */
141 
142 	struct switchtec_user *stuser;
143 
144 	if (stdev->mrpc_busy)
145 		return;
146 
147 	if (list_empty(&stdev->mrpc_queue))
148 		return;
149 
150 	stuser = list_entry(stdev->mrpc_queue.next, struct switchtec_user,
151 			    list);
152 
153 	if (stdev->dma_mrpc) {
154 		stdev->dma_mrpc->status = SWITCHTEC_MRPC_STATUS_INPROGRESS;
155 		memset(stdev->dma_mrpc->data, 0xFF, SWITCHTEC_MRPC_PAYLOAD_SIZE);
156 	}
157 
158 	stuser_set_state(stuser, MRPC_RUNNING);
159 	stdev->mrpc_busy = 1;
160 	memcpy_toio(&stdev->mmio_mrpc->input_data,
161 		    stuser->data, stuser->data_len);
162 	flush_wc_buf(stdev);
163 	iowrite32(stuser->cmd, &stdev->mmio_mrpc->cmd);
164 
165 	schedule_delayed_work(&stdev->mrpc_timeout,
166 			      msecs_to_jiffies(500));
167 }
168 
169 static int mrpc_queue_cmd(struct switchtec_user *stuser)
170 {
171 	/* requires the mrpc_mutex to already be held when called */
172 
173 	struct switchtec_dev *stdev = stuser->stdev;
174 
175 	kref_get(&stuser->kref);
176 	stuser->read_len = sizeof(stuser->data);
177 	stuser_set_state(stuser, MRPC_QUEUED);
178 	init_completion(&stuser->comp);
179 	list_add_tail(&stuser->list, &stdev->mrpc_queue);
180 
181 	mrpc_cmd_submit(stdev);
182 
183 	return 0;
184 }
185 
186 static void mrpc_complete_cmd(struct switchtec_dev *stdev)
187 {
188 	/* requires the mrpc_mutex to already be held when called */
189 	struct switchtec_user *stuser;
190 
191 	if (list_empty(&stdev->mrpc_queue))
192 		return;
193 
194 	stuser = list_entry(stdev->mrpc_queue.next, struct switchtec_user,
195 			    list);
196 
197 	if (stdev->dma_mrpc)
198 		stuser->status = stdev->dma_mrpc->status;
199 	else
200 		stuser->status = ioread32(&stdev->mmio_mrpc->status);
201 
202 	if (stuser->status == SWITCHTEC_MRPC_STATUS_INPROGRESS)
203 		return;
204 
205 	stuser_set_state(stuser, MRPC_DONE);
206 	stuser->return_code = 0;
207 
208 	if (stuser->status != SWITCHTEC_MRPC_STATUS_DONE)
209 		goto out;
210 
211 	if (stdev->dma_mrpc)
212 		stuser->return_code = stdev->dma_mrpc->rtn_code;
213 	else
214 		stuser->return_code = ioread32(&stdev->mmio_mrpc->ret_value);
215 	if (stuser->return_code != 0)
216 		goto out;
217 
218 	if (stdev->dma_mrpc)
219 		memcpy(stuser->data, &stdev->dma_mrpc->data,
220 			      stuser->read_len);
221 	else
222 		memcpy_fromio(stuser->data, &stdev->mmio_mrpc->output_data,
223 			      stuser->read_len);
224 out:
225 	complete_all(&stuser->comp);
226 	list_del_init(&stuser->list);
227 	stuser_put(stuser);
228 	stdev->mrpc_busy = 0;
229 
230 	mrpc_cmd_submit(stdev);
231 }
232 
233 static void mrpc_event_work(struct work_struct *work)
234 {
235 	struct switchtec_dev *stdev;
236 
237 	stdev = container_of(work, struct switchtec_dev, mrpc_work);
238 
239 	dev_dbg(&stdev->dev, "%s\n", __func__);
240 
241 	mutex_lock(&stdev->mrpc_mutex);
242 	cancel_delayed_work(&stdev->mrpc_timeout);
243 	mrpc_complete_cmd(stdev);
244 	mutex_unlock(&stdev->mrpc_mutex);
245 }
246 
247 static void mrpc_timeout_work(struct work_struct *work)
248 {
249 	struct switchtec_dev *stdev;
250 	u32 status;
251 
252 	stdev = container_of(work, struct switchtec_dev, mrpc_timeout.work);
253 
254 	dev_dbg(&stdev->dev, "%s\n", __func__);
255 
256 	mutex_lock(&stdev->mrpc_mutex);
257 
258 	if (stdev->dma_mrpc)
259 		status = stdev->dma_mrpc->status;
260 	else
261 		status = ioread32(&stdev->mmio_mrpc->status);
262 	if (status == SWITCHTEC_MRPC_STATUS_INPROGRESS) {
263 		schedule_delayed_work(&stdev->mrpc_timeout,
264 				      msecs_to_jiffies(500));
265 		goto out;
266 	}
267 
268 	mrpc_complete_cmd(stdev);
269 out:
270 	mutex_unlock(&stdev->mrpc_mutex);
271 }
272 
273 static ssize_t device_version_show(struct device *dev,
274 	struct device_attribute *attr, char *buf)
275 {
276 	struct switchtec_dev *stdev = to_stdev(dev);
277 	u32 ver;
278 
279 	ver = ioread32(&stdev->mmio_sys_info->device_version);
280 
281 	return sprintf(buf, "%x\n", ver);
282 }
283 static DEVICE_ATTR_RO(device_version);
284 
285 static ssize_t fw_version_show(struct device *dev,
286 	struct device_attribute *attr, char *buf)
287 {
288 	struct switchtec_dev *stdev = to_stdev(dev);
289 	u32 ver;
290 
291 	ver = ioread32(&stdev->mmio_sys_info->firmware_version);
292 
293 	return sprintf(buf, "%08x\n", ver);
294 }
295 static DEVICE_ATTR_RO(fw_version);
296 
297 static ssize_t io_string_show(char *buf, void __iomem *attr, size_t len)
298 {
299 	int i;
300 
301 	memcpy_fromio(buf, attr, len);
302 	buf[len] = '\n';
303 	buf[len + 1] = 0;
304 
305 	for (i = len - 1; i > 0; i--) {
306 		if (buf[i] != ' ')
307 			break;
308 		buf[i] = '\n';
309 		buf[i + 1] = 0;
310 	}
311 
312 	return strlen(buf);
313 }
314 
315 #define DEVICE_ATTR_SYS_INFO_STR(field) \
316 static ssize_t field ## _show(struct device *dev, \
317 	struct device_attribute *attr, char *buf) \
318 { \
319 	struct switchtec_dev *stdev = to_stdev(dev); \
320 	struct sys_info_regs __iomem *si = stdev->mmio_sys_info; \
321 	if (stdev->gen == SWITCHTEC_GEN3) \
322 		return io_string_show(buf, &si->gen3.field, \
323 				      sizeof(si->gen3.field)); \
324 	else if (stdev->gen == SWITCHTEC_GEN4) \
325 		return io_string_show(buf, &si->gen4.field, \
326 				      sizeof(si->gen4.field)); \
327 	else \
328 		return -ENOTSUPP; \
329 } \
330 \
331 static DEVICE_ATTR_RO(field)
332 
333 DEVICE_ATTR_SYS_INFO_STR(vendor_id);
334 DEVICE_ATTR_SYS_INFO_STR(product_id);
335 DEVICE_ATTR_SYS_INFO_STR(product_revision);
336 
337 static ssize_t component_vendor_show(struct device *dev,
338 				     struct device_attribute *attr, char *buf)
339 {
340 	struct switchtec_dev *stdev = to_stdev(dev);
341 	struct sys_info_regs __iomem *si = stdev->mmio_sys_info;
342 
343 	/* component_vendor field not supported after gen3 */
344 	if (stdev->gen != SWITCHTEC_GEN3)
345 		return sprintf(buf, "none\n");
346 
347 	return io_string_show(buf, &si->gen3.component_vendor,
348 			      sizeof(si->gen3.component_vendor));
349 }
350 static DEVICE_ATTR_RO(component_vendor);
351 
352 static ssize_t component_id_show(struct device *dev,
353 	struct device_attribute *attr, char *buf)
354 {
355 	struct switchtec_dev *stdev = to_stdev(dev);
356 	int id = ioread16(&stdev->mmio_sys_info->gen3.component_id);
357 
358 	/* component_id field not supported after gen3 */
359 	if (stdev->gen != SWITCHTEC_GEN3)
360 		return sprintf(buf, "none\n");
361 
362 	return sprintf(buf, "PM%04X\n", id);
363 }
364 static DEVICE_ATTR_RO(component_id);
365 
366 static ssize_t component_revision_show(struct device *dev,
367 	struct device_attribute *attr, char *buf)
368 {
369 	struct switchtec_dev *stdev = to_stdev(dev);
370 	int rev = ioread8(&stdev->mmio_sys_info->gen3.component_revision);
371 
372 	/* component_revision field not supported after gen3 */
373 	if (stdev->gen != SWITCHTEC_GEN3)
374 		return sprintf(buf, "255\n");
375 
376 	return sprintf(buf, "%d\n", rev);
377 }
378 static DEVICE_ATTR_RO(component_revision);
379 
380 static ssize_t partition_show(struct device *dev,
381 	struct device_attribute *attr, char *buf)
382 {
383 	struct switchtec_dev *stdev = to_stdev(dev);
384 
385 	return sprintf(buf, "%d\n", stdev->partition);
386 }
387 static DEVICE_ATTR_RO(partition);
388 
389 static ssize_t partition_count_show(struct device *dev,
390 	struct device_attribute *attr, char *buf)
391 {
392 	struct switchtec_dev *stdev = to_stdev(dev);
393 
394 	return sprintf(buf, "%d\n", stdev->partition_count);
395 }
396 static DEVICE_ATTR_RO(partition_count);
397 
398 static struct attribute *switchtec_device_attrs[] = {
399 	&dev_attr_device_version.attr,
400 	&dev_attr_fw_version.attr,
401 	&dev_attr_vendor_id.attr,
402 	&dev_attr_product_id.attr,
403 	&dev_attr_product_revision.attr,
404 	&dev_attr_component_vendor.attr,
405 	&dev_attr_component_id.attr,
406 	&dev_attr_component_revision.attr,
407 	&dev_attr_partition.attr,
408 	&dev_attr_partition_count.attr,
409 	NULL,
410 };
411 
412 ATTRIBUTE_GROUPS(switchtec_device);
413 
414 static int switchtec_dev_open(struct inode *inode, struct file *filp)
415 {
416 	struct switchtec_dev *stdev;
417 	struct switchtec_user *stuser;
418 
419 	stdev = container_of(inode->i_cdev, struct switchtec_dev, cdev);
420 
421 	stuser = stuser_create(stdev);
422 	if (IS_ERR(stuser))
423 		return PTR_ERR(stuser);
424 
425 	filp->private_data = stuser;
426 	stream_open(inode, filp);
427 
428 	dev_dbg(&stdev->dev, "%s: %p\n", __func__, stuser);
429 
430 	return 0;
431 }
432 
433 static int switchtec_dev_release(struct inode *inode, struct file *filp)
434 {
435 	struct switchtec_user *stuser = filp->private_data;
436 
437 	stuser_put(stuser);
438 
439 	return 0;
440 }
441 
442 static int lock_mutex_and_test_alive(struct switchtec_dev *stdev)
443 {
444 	if (mutex_lock_interruptible(&stdev->mrpc_mutex))
445 		return -EINTR;
446 
447 	if (!stdev->alive) {
448 		mutex_unlock(&stdev->mrpc_mutex);
449 		return -ENODEV;
450 	}
451 
452 	return 0;
453 }
454 
455 static ssize_t switchtec_dev_write(struct file *filp, const char __user *data,
456 				   size_t size, loff_t *off)
457 {
458 	struct switchtec_user *stuser = filp->private_data;
459 	struct switchtec_dev *stdev = stuser->stdev;
460 	int rc;
461 
462 	if (size < sizeof(stuser->cmd) ||
463 	    size > sizeof(stuser->cmd) + sizeof(stuser->data))
464 		return -EINVAL;
465 
466 	stuser->data_len = size - sizeof(stuser->cmd);
467 
468 	rc = lock_mutex_and_test_alive(stdev);
469 	if (rc)
470 		return rc;
471 
472 	if (stuser->state != MRPC_IDLE) {
473 		rc = -EBADE;
474 		goto out;
475 	}
476 
477 	rc = copy_from_user(&stuser->cmd, data, sizeof(stuser->cmd));
478 	if (rc) {
479 		rc = -EFAULT;
480 		goto out;
481 	}
482 	if (((MRPC_CMD_ID(stuser->cmd) == MRPC_GAS_WRITE) ||
483 	     (MRPC_CMD_ID(stuser->cmd) == MRPC_GAS_READ)) &&
484 	    !capable(CAP_SYS_ADMIN)) {
485 		rc = -EPERM;
486 		goto out;
487 	}
488 
489 	data += sizeof(stuser->cmd);
490 	rc = copy_from_user(&stuser->data, data, size - sizeof(stuser->cmd));
491 	if (rc) {
492 		rc = -EFAULT;
493 		goto out;
494 	}
495 
496 	rc = mrpc_queue_cmd(stuser);
497 
498 out:
499 	mutex_unlock(&stdev->mrpc_mutex);
500 
501 	if (rc)
502 		return rc;
503 
504 	return size;
505 }
506 
507 static ssize_t switchtec_dev_read(struct file *filp, char __user *data,
508 				  size_t size, loff_t *off)
509 {
510 	struct switchtec_user *stuser = filp->private_data;
511 	struct switchtec_dev *stdev = stuser->stdev;
512 	int rc;
513 
514 	if (size < sizeof(stuser->cmd) ||
515 	    size > sizeof(stuser->cmd) + sizeof(stuser->data))
516 		return -EINVAL;
517 
518 	rc = lock_mutex_and_test_alive(stdev);
519 	if (rc)
520 		return rc;
521 
522 	if (stuser->state == MRPC_IDLE) {
523 		mutex_unlock(&stdev->mrpc_mutex);
524 		return -EBADE;
525 	}
526 
527 	stuser->read_len = size - sizeof(stuser->return_code);
528 
529 	mutex_unlock(&stdev->mrpc_mutex);
530 
531 	if (filp->f_flags & O_NONBLOCK) {
532 		if (!try_wait_for_completion(&stuser->comp))
533 			return -EAGAIN;
534 	} else {
535 		rc = wait_for_completion_interruptible(&stuser->comp);
536 		if (rc < 0)
537 			return rc;
538 	}
539 
540 	rc = lock_mutex_and_test_alive(stdev);
541 	if (rc)
542 		return rc;
543 
544 	if (stuser->state != MRPC_DONE) {
545 		mutex_unlock(&stdev->mrpc_mutex);
546 		return -EBADE;
547 	}
548 
549 	rc = copy_to_user(data, &stuser->return_code,
550 			  sizeof(stuser->return_code));
551 	if (rc) {
552 		rc = -EFAULT;
553 		goto out;
554 	}
555 
556 	data += sizeof(stuser->return_code);
557 	rc = copy_to_user(data, &stuser->data,
558 			  size - sizeof(stuser->return_code));
559 	if (rc) {
560 		rc = -EFAULT;
561 		goto out;
562 	}
563 
564 	stuser_set_state(stuser, MRPC_IDLE);
565 
566 out:
567 	mutex_unlock(&stdev->mrpc_mutex);
568 
569 	if (stuser->status == SWITCHTEC_MRPC_STATUS_DONE)
570 		return size;
571 	else if (stuser->status == SWITCHTEC_MRPC_STATUS_INTERRUPTED)
572 		return -ENXIO;
573 	else
574 		return -EBADMSG;
575 }
576 
577 static __poll_t switchtec_dev_poll(struct file *filp, poll_table *wait)
578 {
579 	struct switchtec_user *stuser = filp->private_data;
580 	struct switchtec_dev *stdev = stuser->stdev;
581 	__poll_t ret = 0;
582 
583 	poll_wait(filp, &stuser->comp.wait, wait);
584 	poll_wait(filp, &stdev->event_wq, wait);
585 
586 	if (lock_mutex_and_test_alive(stdev))
587 		return EPOLLIN | EPOLLRDHUP | EPOLLOUT | EPOLLERR | EPOLLHUP;
588 
589 	mutex_unlock(&stdev->mrpc_mutex);
590 
591 	if (try_wait_for_completion(&stuser->comp))
592 		ret |= EPOLLIN | EPOLLRDNORM;
593 
594 	if (stuser->event_cnt != atomic_read(&stdev->event_cnt))
595 		ret |= EPOLLPRI | EPOLLRDBAND;
596 
597 	return ret;
598 }
599 
600 static int ioctl_flash_info(struct switchtec_dev *stdev,
601 			    struct switchtec_ioctl_flash_info __user *uinfo)
602 {
603 	struct switchtec_ioctl_flash_info info = {0};
604 	struct flash_info_regs __iomem *fi = stdev->mmio_flash_info;
605 
606 	if (stdev->gen == SWITCHTEC_GEN3) {
607 		info.flash_length = ioread32(&fi->gen3.flash_length);
608 		info.num_partitions = SWITCHTEC_NUM_PARTITIONS_GEN3;
609 	} else if (stdev->gen == SWITCHTEC_GEN4) {
610 		info.flash_length = ioread32(&fi->gen4.flash_length);
611 		info.num_partitions = SWITCHTEC_NUM_PARTITIONS_GEN4;
612 	} else {
613 		return -ENOTSUPP;
614 	}
615 
616 	if (copy_to_user(uinfo, &info, sizeof(info)))
617 		return -EFAULT;
618 
619 	return 0;
620 }
621 
622 static void set_fw_info_part(struct switchtec_ioctl_flash_part_info *info,
623 			     struct partition_info __iomem *pi)
624 {
625 	info->address = ioread32(&pi->address);
626 	info->length = ioread32(&pi->length);
627 }
628 
629 static int flash_part_info_gen3(struct switchtec_dev *stdev,
630 		struct switchtec_ioctl_flash_part_info *info)
631 {
632 	struct flash_info_regs_gen3 __iomem *fi =
633 		&stdev->mmio_flash_info->gen3;
634 	struct sys_info_regs_gen3 __iomem *si = &stdev->mmio_sys_info->gen3;
635 	u32 active_addr = -1;
636 
637 	switch (info->flash_partition) {
638 	case SWITCHTEC_IOCTL_PART_CFG0:
639 		active_addr = ioread32(&fi->active_cfg);
640 		set_fw_info_part(info, &fi->cfg0);
641 		if (ioread16(&si->cfg_running) == SWITCHTEC_GEN3_CFG0_RUNNING)
642 			info->active |= SWITCHTEC_IOCTL_PART_RUNNING;
643 		break;
644 	case SWITCHTEC_IOCTL_PART_CFG1:
645 		active_addr = ioread32(&fi->active_cfg);
646 		set_fw_info_part(info, &fi->cfg1);
647 		if (ioread16(&si->cfg_running) == SWITCHTEC_GEN3_CFG1_RUNNING)
648 			info->active |= SWITCHTEC_IOCTL_PART_RUNNING;
649 		break;
650 	case SWITCHTEC_IOCTL_PART_IMG0:
651 		active_addr = ioread32(&fi->active_img);
652 		set_fw_info_part(info, &fi->img0);
653 		if (ioread16(&si->img_running) == SWITCHTEC_GEN3_IMG0_RUNNING)
654 			info->active |= SWITCHTEC_IOCTL_PART_RUNNING;
655 		break;
656 	case SWITCHTEC_IOCTL_PART_IMG1:
657 		active_addr = ioread32(&fi->active_img);
658 		set_fw_info_part(info, &fi->img1);
659 		if (ioread16(&si->img_running) == SWITCHTEC_GEN3_IMG1_RUNNING)
660 			info->active |= SWITCHTEC_IOCTL_PART_RUNNING;
661 		break;
662 	case SWITCHTEC_IOCTL_PART_NVLOG:
663 		set_fw_info_part(info, &fi->nvlog);
664 		break;
665 	case SWITCHTEC_IOCTL_PART_VENDOR0:
666 		set_fw_info_part(info, &fi->vendor[0]);
667 		break;
668 	case SWITCHTEC_IOCTL_PART_VENDOR1:
669 		set_fw_info_part(info, &fi->vendor[1]);
670 		break;
671 	case SWITCHTEC_IOCTL_PART_VENDOR2:
672 		set_fw_info_part(info, &fi->vendor[2]);
673 		break;
674 	case SWITCHTEC_IOCTL_PART_VENDOR3:
675 		set_fw_info_part(info, &fi->vendor[3]);
676 		break;
677 	case SWITCHTEC_IOCTL_PART_VENDOR4:
678 		set_fw_info_part(info, &fi->vendor[4]);
679 		break;
680 	case SWITCHTEC_IOCTL_PART_VENDOR5:
681 		set_fw_info_part(info, &fi->vendor[5]);
682 		break;
683 	case SWITCHTEC_IOCTL_PART_VENDOR6:
684 		set_fw_info_part(info, &fi->vendor[6]);
685 		break;
686 	case SWITCHTEC_IOCTL_PART_VENDOR7:
687 		set_fw_info_part(info, &fi->vendor[7]);
688 		break;
689 	default:
690 		return -EINVAL;
691 	}
692 
693 	if (info->address == active_addr)
694 		info->active |= SWITCHTEC_IOCTL_PART_ACTIVE;
695 
696 	return 0;
697 }
698 
699 static int flash_part_info_gen4(struct switchtec_dev *stdev,
700 		struct switchtec_ioctl_flash_part_info *info)
701 {
702 	struct flash_info_regs_gen4 __iomem *fi = &stdev->mmio_flash_info->gen4;
703 	struct sys_info_regs_gen4 __iomem *si = &stdev->mmio_sys_info->gen4;
704 	struct active_partition_info_gen4 __iomem *af = &fi->active_flag;
705 
706 	switch (info->flash_partition) {
707 	case SWITCHTEC_IOCTL_PART_MAP_0:
708 		set_fw_info_part(info, &fi->map0);
709 		break;
710 	case SWITCHTEC_IOCTL_PART_MAP_1:
711 		set_fw_info_part(info, &fi->map1);
712 		break;
713 	case SWITCHTEC_IOCTL_PART_KEY_0:
714 		set_fw_info_part(info, &fi->key0);
715 		if (ioread8(&af->key) == SWITCHTEC_GEN4_KEY0_ACTIVE)
716 			info->active |= SWITCHTEC_IOCTL_PART_ACTIVE;
717 		if (ioread16(&si->key_running) == SWITCHTEC_GEN4_KEY0_RUNNING)
718 			info->active |= SWITCHTEC_IOCTL_PART_RUNNING;
719 		break;
720 	case SWITCHTEC_IOCTL_PART_KEY_1:
721 		set_fw_info_part(info, &fi->key1);
722 		if (ioread8(&af->key) == SWITCHTEC_GEN4_KEY1_ACTIVE)
723 			info->active |= SWITCHTEC_IOCTL_PART_ACTIVE;
724 		if (ioread16(&si->key_running) == SWITCHTEC_GEN4_KEY1_RUNNING)
725 			info->active |= SWITCHTEC_IOCTL_PART_RUNNING;
726 		break;
727 	case SWITCHTEC_IOCTL_PART_BL2_0:
728 		set_fw_info_part(info, &fi->bl2_0);
729 		if (ioread8(&af->bl2) == SWITCHTEC_GEN4_BL2_0_ACTIVE)
730 			info->active |= SWITCHTEC_IOCTL_PART_ACTIVE;
731 		if (ioread16(&si->bl2_running) == SWITCHTEC_GEN4_BL2_0_RUNNING)
732 			info->active |= SWITCHTEC_IOCTL_PART_RUNNING;
733 		break;
734 	case SWITCHTEC_IOCTL_PART_BL2_1:
735 		set_fw_info_part(info, &fi->bl2_1);
736 		if (ioread8(&af->bl2) == SWITCHTEC_GEN4_BL2_1_ACTIVE)
737 			info->active |= SWITCHTEC_IOCTL_PART_ACTIVE;
738 		if (ioread16(&si->bl2_running) == SWITCHTEC_GEN4_BL2_1_RUNNING)
739 			info->active |= SWITCHTEC_IOCTL_PART_RUNNING;
740 		break;
741 	case SWITCHTEC_IOCTL_PART_CFG0:
742 		set_fw_info_part(info, &fi->cfg0);
743 		if (ioread8(&af->cfg) == SWITCHTEC_GEN4_CFG0_ACTIVE)
744 			info->active |= SWITCHTEC_IOCTL_PART_ACTIVE;
745 		if (ioread16(&si->cfg_running) == SWITCHTEC_GEN4_CFG0_RUNNING)
746 			info->active |= SWITCHTEC_IOCTL_PART_RUNNING;
747 		break;
748 	case SWITCHTEC_IOCTL_PART_CFG1:
749 		set_fw_info_part(info, &fi->cfg1);
750 		if (ioread8(&af->cfg) == SWITCHTEC_GEN4_CFG1_ACTIVE)
751 			info->active |= SWITCHTEC_IOCTL_PART_ACTIVE;
752 		if (ioread16(&si->cfg_running) == SWITCHTEC_GEN4_CFG1_RUNNING)
753 			info->active |= SWITCHTEC_IOCTL_PART_RUNNING;
754 		break;
755 	case SWITCHTEC_IOCTL_PART_IMG0:
756 		set_fw_info_part(info, &fi->img0);
757 		if (ioread8(&af->img) == SWITCHTEC_GEN4_IMG0_ACTIVE)
758 			info->active |= SWITCHTEC_IOCTL_PART_ACTIVE;
759 		if (ioread16(&si->img_running) == SWITCHTEC_GEN4_IMG0_RUNNING)
760 			info->active |= SWITCHTEC_IOCTL_PART_RUNNING;
761 		break;
762 	case SWITCHTEC_IOCTL_PART_IMG1:
763 		set_fw_info_part(info, &fi->img1);
764 		if (ioread8(&af->img) == SWITCHTEC_GEN4_IMG1_ACTIVE)
765 			info->active |= SWITCHTEC_IOCTL_PART_ACTIVE;
766 		if (ioread16(&si->img_running) == SWITCHTEC_GEN4_IMG1_RUNNING)
767 			info->active |= SWITCHTEC_IOCTL_PART_RUNNING;
768 		break;
769 	case SWITCHTEC_IOCTL_PART_NVLOG:
770 		set_fw_info_part(info, &fi->nvlog);
771 		break;
772 	case SWITCHTEC_IOCTL_PART_VENDOR0:
773 		set_fw_info_part(info, &fi->vendor[0]);
774 		break;
775 	case SWITCHTEC_IOCTL_PART_VENDOR1:
776 		set_fw_info_part(info, &fi->vendor[1]);
777 		break;
778 	case SWITCHTEC_IOCTL_PART_VENDOR2:
779 		set_fw_info_part(info, &fi->vendor[2]);
780 		break;
781 	case SWITCHTEC_IOCTL_PART_VENDOR3:
782 		set_fw_info_part(info, &fi->vendor[3]);
783 		break;
784 	case SWITCHTEC_IOCTL_PART_VENDOR4:
785 		set_fw_info_part(info, &fi->vendor[4]);
786 		break;
787 	case SWITCHTEC_IOCTL_PART_VENDOR5:
788 		set_fw_info_part(info, &fi->vendor[5]);
789 		break;
790 	case SWITCHTEC_IOCTL_PART_VENDOR6:
791 		set_fw_info_part(info, &fi->vendor[6]);
792 		break;
793 	case SWITCHTEC_IOCTL_PART_VENDOR7:
794 		set_fw_info_part(info, &fi->vendor[7]);
795 		break;
796 	default:
797 		return -EINVAL;
798 	}
799 
800 	return 0;
801 }
802 
803 static int ioctl_flash_part_info(struct switchtec_dev *stdev,
804 		struct switchtec_ioctl_flash_part_info __user *uinfo)
805 {
806 	int ret;
807 	struct switchtec_ioctl_flash_part_info info = {0};
808 
809 	if (copy_from_user(&info, uinfo, sizeof(info)))
810 		return -EFAULT;
811 
812 	if (stdev->gen == SWITCHTEC_GEN3) {
813 		ret = flash_part_info_gen3(stdev, &info);
814 		if (ret)
815 			return ret;
816 	} else if (stdev->gen == SWITCHTEC_GEN4) {
817 		ret = flash_part_info_gen4(stdev, &info);
818 		if (ret)
819 			return ret;
820 	} else {
821 		return -ENOTSUPP;
822 	}
823 
824 	if (copy_to_user(uinfo, &info, sizeof(info)))
825 		return -EFAULT;
826 
827 	return 0;
828 }
829 
830 static int ioctl_event_summary(struct switchtec_dev *stdev,
831 	struct switchtec_user *stuser,
832 	struct switchtec_ioctl_event_summary __user *usum,
833 	size_t size)
834 {
835 	struct switchtec_ioctl_event_summary *s;
836 	int i;
837 	u32 reg;
838 	int ret = 0;
839 
840 	s = kzalloc(sizeof(*s), GFP_KERNEL);
841 	if (!s)
842 		return -ENOMEM;
843 
844 	s->global = ioread32(&stdev->mmio_sw_event->global_summary);
845 	s->part_bitmap = ioread64(&stdev->mmio_sw_event->part_event_bitmap);
846 	s->local_part = ioread32(&stdev->mmio_part_cfg->part_event_summary);
847 
848 	for (i = 0; i < stdev->partition_count; i++) {
849 		reg = ioread32(&stdev->mmio_part_cfg_all[i].part_event_summary);
850 		s->part[i] = reg;
851 	}
852 
853 	for (i = 0; i < stdev->pff_csr_count; i++) {
854 		reg = ioread32(&stdev->mmio_pff_csr[i].pff_event_summary);
855 		s->pff[i] = reg;
856 	}
857 
858 	if (copy_to_user(usum, s, size)) {
859 		ret = -EFAULT;
860 		goto error_case;
861 	}
862 
863 	stuser->event_cnt = atomic_read(&stdev->event_cnt);
864 
865 error_case:
866 	kfree(s);
867 	return ret;
868 }
869 
870 static u32 __iomem *global_ev_reg(struct switchtec_dev *stdev,
871 				  size_t offset, int index)
872 {
873 	return (void __iomem *)stdev->mmio_sw_event + offset;
874 }
875 
876 static u32 __iomem *part_ev_reg(struct switchtec_dev *stdev,
877 				size_t offset, int index)
878 {
879 	return (void __iomem *)&stdev->mmio_part_cfg_all[index] + offset;
880 }
881 
882 static u32 __iomem *pff_ev_reg(struct switchtec_dev *stdev,
883 			       size_t offset, int index)
884 {
885 	return (void __iomem *)&stdev->mmio_pff_csr[index] + offset;
886 }
887 
888 #define EV_GLB(i, r)[i] = {offsetof(struct sw_event_regs, r), global_ev_reg}
889 #define EV_PAR(i, r)[i] = {offsetof(struct part_cfg_regs, r), part_ev_reg}
890 #define EV_PFF(i, r)[i] = {offsetof(struct pff_csr_regs, r), pff_ev_reg}
891 
892 static const struct event_reg {
893 	size_t offset;
894 	u32 __iomem *(*map_reg)(struct switchtec_dev *stdev,
895 				size_t offset, int index);
896 } event_regs[] = {
897 	EV_GLB(SWITCHTEC_IOCTL_EVENT_STACK_ERROR, stack_error_event_hdr),
898 	EV_GLB(SWITCHTEC_IOCTL_EVENT_PPU_ERROR, ppu_error_event_hdr),
899 	EV_GLB(SWITCHTEC_IOCTL_EVENT_ISP_ERROR, isp_error_event_hdr),
900 	EV_GLB(SWITCHTEC_IOCTL_EVENT_SYS_RESET, sys_reset_event_hdr),
901 	EV_GLB(SWITCHTEC_IOCTL_EVENT_FW_EXC, fw_exception_hdr),
902 	EV_GLB(SWITCHTEC_IOCTL_EVENT_FW_NMI, fw_nmi_hdr),
903 	EV_GLB(SWITCHTEC_IOCTL_EVENT_FW_NON_FATAL, fw_non_fatal_hdr),
904 	EV_GLB(SWITCHTEC_IOCTL_EVENT_FW_FATAL, fw_fatal_hdr),
905 	EV_GLB(SWITCHTEC_IOCTL_EVENT_TWI_MRPC_COMP, twi_mrpc_comp_hdr),
906 	EV_GLB(SWITCHTEC_IOCTL_EVENT_TWI_MRPC_COMP_ASYNC,
907 	       twi_mrpc_comp_async_hdr),
908 	EV_GLB(SWITCHTEC_IOCTL_EVENT_CLI_MRPC_COMP, cli_mrpc_comp_hdr),
909 	EV_GLB(SWITCHTEC_IOCTL_EVENT_CLI_MRPC_COMP_ASYNC,
910 	       cli_mrpc_comp_async_hdr),
911 	EV_GLB(SWITCHTEC_IOCTL_EVENT_GPIO_INT, gpio_interrupt_hdr),
912 	EV_GLB(SWITCHTEC_IOCTL_EVENT_GFMS, gfms_event_hdr),
913 	EV_PAR(SWITCHTEC_IOCTL_EVENT_PART_RESET, part_reset_hdr),
914 	EV_PAR(SWITCHTEC_IOCTL_EVENT_MRPC_COMP, mrpc_comp_hdr),
915 	EV_PAR(SWITCHTEC_IOCTL_EVENT_MRPC_COMP_ASYNC, mrpc_comp_async_hdr),
916 	EV_PAR(SWITCHTEC_IOCTL_EVENT_DYN_PART_BIND_COMP, dyn_binding_hdr),
917 	EV_PAR(SWITCHTEC_IOCTL_EVENT_INTERCOMM_REQ_NOTIFY,
918 	       intercomm_notify_hdr),
919 	EV_PFF(SWITCHTEC_IOCTL_EVENT_AER_IN_P2P, aer_in_p2p_hdr),
920 	EV_PFF(SWITCHTEC_IOCTL_EVENT_AER_IN_VEP, aer_in_vep_hdr),
921 	EV_PFF(SWITCHTEC_IOCTL_EVENT_DPC, dpc_hdr),
922 	EV_PFF(SWITCHTEC_IOCTL_EVENT_CTS, cts_hdr),
923 	EV_PFF(SWITCHTEC_IOCTL_EVENT_UEC, uec_hdr),
924 	EV_PFF(SWITCHTEC_IOCTL_EVENT_HOTPLUG, hotplug_hdr),
925 	EV_PFF(SWITCHTEC_IOCTL_EVENT_IER, ier_hdr),
926 	EV_PFF(SWITCHTEC_IOCTL_EVENT_THRESH, threshold_hdr),
927 	EV_PFF(SWITCHTEC_IOCTL_EVENT_POWER_MGMT, power_mgmt_hdr),
928 	EV_PFF(SWITCHTEC_IOCTL_EVENT_TLP_THROTTLING, tlp_throttling_hdr),
929 	EV_PFF(SWITCHTEC_IOCTL_EVENT_FORCE_SPEED, force_speed_hdr),
930 	EV_PFF(SWITCHTEC_IOCTL_EVENT_CREDIT_TIMEOUT, credit_timeout_hdr),
931 	EV_PFF(SWITCHTEC_IOCTL_EVENT_LINK_STATE, link_state_hdr),
932 };
933 
934 static u32 __iomem *event_hdr_addr(struct switchtec_dev *stdev,
935 				   int event_id, int index)
936 {
937 	size_t off;
938 
939 	if (event_id < 0 || event_id >= SWITCHTEC_IOCTL_MAX_EVENTS)
940 		return ERR_PTR(-EINVAL);
941 
942 	off = event_regs[event_id].offset;
943 
944 	if (event_regs[event_id].map_reg == part_ev_reg) {
945 		if (index == SWITCHTEC_IOCTL_EVENT_LOCAL_PART_IDX)
946 			index = stdev->partition;
947 		else if (index < 0 || index >= stdev->partition_count)
948 			return ERR_PTR(-EINVAL);
949 	} else if (event_regs[event_id].map_reg == pff_ev_reg) {
950 		if (index < 0 || index >= stdev->pff_csr_count)
951 			return ERR_PTR(-EINVAL);
952 	}
953 
954 	return event_regs[event_id].map_reg(stdev, off, index);
955 }
956 
957 static int event_ctl(struct switchtec_dev *stdev,
958 		     struct switchtec_ioctl_event_ctl *ctl)
959 {
960 	int i;
961 	u32 __iomem *reg;
962 	u32 hdr;
963 
964 	reg = event_hdr_addr(stdev, ctl->event_id, ctl->index);
965 	if (IS_ERR(reg))
966 		return PTR_ERR(reg);
967 
968 	hdr = ioread32(reg);
969 	for (i = 0; i < ARRAY_SIZE(ctl->data); i++)
970 		ctl->data[i] = ioread32(&reg[i + 1]);
971 
972 	ctl->occurred = hdr & SWITCHTEC_EVENT_OCCURRED;
973 	ctl->count = (hdr >> 5) & 0xFF;
974 
975 	if (!(ctl->flags & SWITCHTEC_IOCTL_EVENT_FLAG_CLEAR))
976 		hdr &= ~SWITCHTEC_EVENT_CLEAR;
977 	if (ctl->flags & SWITCHTEC_IOCTL_EVENT_FLAG_EN_POLL)
978 		hdr |= SWITCHTEC_EVENT_EN_IRQ;
979 	if (ctl->flags & SWITCHTEC_IOCTL_EVENT_FLAG_DIS_POLL)
980 		hdr &= ~SWITCHTEC_EVENT_EN_IRQ;
981 	if (ctl->flags & SWITCHTEC_IOCTL_EVENT_FLAG_EN_LOG)
982 		hdr |= SWITCHTEC_EVENT_EN_LOG;
983 	if (ctl->flags & SWITCHTEC_IOCTL_EVENT_FLAG_DIS_LOG)
984 		hdr &= ~SWITCHTEC_EVENT_EN_LOG;
985 	if (ctl->flags & SWITCHTEC_IOCTL_EVENT_FLAG_EN_CLI)
986 		hdr |= SWITCHTEC_EVENT_EN_CLI;
987 	if (ctl->flags & SWITCHTEC_IOCTL_EVENT_FLAG_DIS_CLI)
988 		hdr &= ~SWITCHTEC_EVENT_EN_CLI;
989 	if (ctl->flags & SWITCHTEC_IOCTL_EVENT_FLAG_EN_FATAL)
990 		hdr |= SWITCHTEC_EVENT_FATAL;
991 	if (ctl->flags & SWITCHTEC_IOCTL_EVENT_FLAG_DIS_FATAL)
992 		hdr &= ~SWITCHTEC_EVENT_FATAL;
993 
994 	if (ctl->flags)
995 		iowrite32(hdr, reg);
996 
997 	ctl->flags = 0;
998 	if (hdr & SWITCHTEC_EVENT_EN_IRQ)
999 		ctl->flags |= SWITCHTEC_IOCTL_EVENT_FLAG_EN_POLL;
1000 	if (hdr & SWITCHTEC_EVENT_EN_LOG)
1001 		ctl->flags |= SWITCHTEC_IOCTL_EVENT_FLAG_EN_LOG;
1002 	if (hdr & SWITCHTEC_EVENT_EN_CLI)
1003 		ctl->flags |= SWITCHTEC_IOCTL_EVENT_FLAG_EN_CLI;
1004 	if (hdr & SWITCHTEC_EVENT_FATAL)
1005 		ctl->flags |= SWITCHTEC_IOCTL_EVENT_FLAG_EN_FATAL;
1006 
1007 	return 0;
1008 }
1009 
1010 static int ioctl_event_ctl(struct switchtec_dev *stdev,
1011 	struct switchtec_ioctl_event_ctl __user *uctl)
1012 {
1013 	int ret;
1014 	int nr_idxs;
1015 	unsigned int event_flags;
1016 	struct switchtec_ioctl_event_ctl ctl;
1017 
1018 	if (copy_from_user(&ctl, uctl, sizeof(ctl)))
1019 		return -EFAULT;
1020 
1021 	if (ctl.event_id >= SWITCHTEC_IOCTL_MAX_EVENTS)
1022 		return -EINVAL;
1023 
1024 	if (ctl.flags & SWITCHTEC_IOCTL_EVENT_FLAG_UNUSED)
1025 		return -EINVAL;
1026 
1027 	if (ctl.index == SWITCHTEC_IOCTL_EVENT_IDX_ALL) {
1028 		if (event_regs[ctl.event_id].map_reg == global_ev_reg)
1029 			nr_idxs = 1;
1030 		else if (event_regs[ctl.event_id].map_reg == part_ev_reg)
1031 			nr_idxs = stdev->partition_count;
1032 		else if (event_regs[ctl.event_id].map_reg == pff_ev_reg)
1033 			nr_idxs = stdev->pff_csr_count;
1034 		else
1035 			return -EINVAL;
1036 
1037 		event_flags = ctl.flags;
1038 		for (ctl.index = 0; ctl.index < nr_idxs; ctl.index++) {
1039 			ctl.flags = event_flags;
1040 			ret = event_ctl(stdev, &ctl);
1041 			if (ret < 0)
1042 				return ret;
1043 		}
1044 	} else {
1045 		ret = event_ctl(stdev, &ctl);
1046 		if (ret < 0)
1047 			return ret;
1048 	}
1049 
1050 	if (copy_to_user(uctl, &ctl, sizeof(ctl)))
1051 		return -EFAULT;
1052 
1053 	return 0;
1054 }
1055 
1056 static int ioctl_pff_to_port(struct switchtec_dev *stdev,
1057 			     struct switchtec_ioctl_pff_port *up)
1058 {
1059 	int i, part;
1060 	u32 reg;
1061 	struct part_cfg_regs *pcfg;
1062 	struct switchtec_ioctl_pff_port p;
1063 
1064 	if (copy_from_user(&p, up, sizeof(p)))
1065 		return -EFAULT;
1066 
1067 	p.port = -1;
1068 	for (part = 0; part < stdev->partition_count; part++) {
1069 		pcfg = &stdev->mmio_part_cfg_all[part];
1070 		p.partition = part;
1071 
1072 		reg = ioread32(&pcfg->usp_pff_inst_id);
1073 		if (reg == p.pff) {
1074 			p.port = 0;
1075 			break;
1076 		}
1077 
1078 		reg = ioread32(&pcfg->vep_pff_inst_id);
1079 		if (reg == p.pff) {
1080 			p.port = SWITCHTEC_IOCTL_PFF_VEP;
1081 			break;
1082 		}
1083 
1084 		for (i = 0; i < ARRAY_SIZE(pcfg->dsp_pff_inst_id); i++) {
1085 			reg = ioread32(&pcfg->dsp_pff_inst_id[i]);
1086 			if (reg != p.pff)
1087 				continue;
1088 
1089 			p.port = i + 1;
1090 			break;
1091 		}
1092 
1093 		if (p.port != -1)
1094 			break;
1095 	}
1096 
1097 	if (copy_to_user(up, &p, sizeof(p)))
1098 		return -EFAULT;
1099 
1100 	return 0;
1101 }
1102 
1103 static int ioctl_port_to_pff(struct switchtec_dev *stdev,
1104 			     struct switchtec_ioctl_pff_port *up)
1105 {
1106 	struct switchtec_ioctl_pff_port p;
1107 	struct part_cfg_regs *pcfg;
1108 
1109 	if (copy_from_user(&p, up, sizeof(p)))
1110 		return -EFAULT;
1111 
1112 	if (p.partition == SWITCHTEC_IOCTL_EVENT_LOCAL_PART_IDX)
1113 		pcfg = stdev->mmio_part_cfg;
1114 	else if (p.partition < stdev->partition_count)
1115 		pcfg = &stdev->mmio_part_cfg_all[p.partition];
1116 	else
1117 		return -EINVAL;
1118 
1119 	switch (p.port) {
1120 	case 0:
1121 		p.pff = ioread32(&pcfg->usp_pff_inst_id);
1122 		break;
1123 	case SWITCHTEC_IOCTL_PFF_VEP:
1124 		p.pff = ioread32(&pcfg->vep_pff_inst_id);
1125 		break;
1126 	default:
1127 		if (p.port > ARRAY_SIZE(pcfg->dsp_pff_inst_id))
1128 			return -EINVAL;
1129 		p.port = array_index_nospec(p.port,
1130 					ARRAY_SIZE(pcfg->dsp_pff_inst_id) + 1);
1131 		p.pff = ioread32(&pcfg->dsp_pff_inst_id[p.port - 1]);
1132 		break;
1133 	}
1134 
1135 	if (copy_to_user(up, &p, sizeof(p)))
1136 		return -EFAULT;
1137 
1138 	return 0;
1139 }
1140 
1141 static long switchtec_dev_ioctl(struct file *filp, unsigned int cmd,
1142 				unsigned long arg)
1143 {
1144 	struct switchtec_user *stuser = filp->private_data;
1145 	struct switchtec_dev *stdev = stuser->stdev;
1146 	int rc;
1147 	void __user *argp = (void __user *)arg;
1148 
1149 	rc = lock_mutex_and_test_alive(stdev);
1150 	if (rc)
1151 		return rc;
1152 
1153 	switch (cmd) {
1154 	case SWITCHTEC_IOCTL_FLASH_INFO:
1155 		rc = ioctl_flash_info(stdev, argp);
1156 		break;
1157 	case SWITCHTEC_IOCTL_FLASH_PART_INFO:
1158 		rc = ioctl_flash_part_info(stdev, argp);
1159 		break;
1160 	case SWITCHTEC_IOCTL_EVENT_SUMMARY_LEGACY:
1161 		rc = ioctl_event_summary(stdev, stuser, argp,
1162 					 sizeof(struct switchtec_ioctl_event_summary_legacy));
1163 		break;
1164 	case SWITCHTEC_IOCTL_EVENT_CTL:
1165 		rc = ioctl_event_ctl(stdev, argp);
1166 		break;
1167 	case SWITCHTEC_IOCTL_PFF_TO_PORT:
1168 		rc = ioctl_pff_to_port(stdev, argp);
1169 		break;
1170 	case SWITCHTEC_IOCTL_PORT_TO_PFF:
1171 		rc = ioctl_port_to_pff(stdev, argp);
1172 		break;
1173 	case SWITCHTEC_IOCTL_EVENT_SUMMARY:
1174 		rc = ioctl_event_summary(stdev, stuser, argp,
1175 					 sizeof(struct switchtec_ioctl_event_summary));
1176 		break;
1177 	default:
1178 		rc = -ENOTTY;
1179 		break;
1180 	}
1181 
1182 	mutex_unlock(&stdev->mrpc_mutex);
1183 	return rc;
1184 }
1185 
1186 static const struct file_operations switchtec_fops = {
1187 	.owner = THIS_MODULE,
1188 	.open = switchtec_dev_open,
1189 	.release = switchtec_dev_release,
1190 	.write = switchtec_dev_write,
1191 	.read = switchtec_dev_read,
1192 	.poll = switchtec_dev_poll,
1193 	.unlocked_ioctl = switchtec_dev_ioctl,
1194 	.compat_ioctl = compat_ptr_ioctl,
1195 };
1196 
1197 static void link_event_work(struct work_struct *work)
1198 {
1199 	struct switchtec_dev *stdev;
1200 
1201 	stdev = container_of(work, struct switchtec_dev, link_event_work);
1202 
1203 	if (stdev->link_notifier)
1204 		stdev->link_notifier(stdev);
1205 }
1206 
1207 static void check_link_state_events(struct switchtec_dev *stdev)
1208 {
1209 	int idx;
1210 	u32 reg;
1211 	int count;
1212 	int occurred = 0;
1213 
1214 	for (idx = 0; idx < stdev->pff_csr_count; idx++) {
1215 		reg = ioread32(&stdev->mmio_pff_csr[idx].link_state_hdr);
1216 		dev_dbg(&stdev->dev, "link_state: %d->%08x\n", idx, reg);
1217 		count = (reg >> 5) & 0xFF;
1218 
1219 		if (count != stdev->link_event_count[idx]) {
1220 			occurred = 1;
1221 			stdev->link_event_count[idx] = count;
1222 		}
1223 	}
1224 
1225 	if (occurred)
1226 		schedule_work(&stdev->link_event_work);
1227 }
1228 
1229 static void enable_link_state_events(struct switchtec_dev *stdev)
1230 {
1231 	int idx;
1232 
1233 	for (idx = 0; idx < stdev->pff_csr_count; idx++) {
1234 		iowrite32(SWITCHTEC_EVENT_CLEAR |
1235 			  SWITCHTEC_EVENT_EN_IRQ,
1236 			  &stdev->mmio_pff_csr[idx].link_state_hdr);
1237 	}
1238 }
1239 
1240 static void enable_dma_mrpc(struct switchtec_dev *stdev)
1241 {
1242 	writeq(stdev->dma_mrpc_dma_addr, &stdev->mmio_mrpc->dma_addr);
1243 	flush_wc_buf(stdev);
1244 	iowrite32(SWITCHTEC_DMA_MRPC_EN, &stdev->mmio_mrpc->dma_en);
1245 }
1246 
1247 static void stdev_release(struct device *dev)
1248 {
1249 	struct switchtec_dev *stdev = to_stdev(dev);
1250 
1251 	if (stdev->dma_mrpc) {
1252 		iowrite32(0, &stdev->mmio_mrpc->dma_en);
1253 		flush_wc_buf(stdev);
1254 		writeq(0, &stdev->mmio_mrpc->dma_addr);
1255 		dma_free_coherent(&stdev->pdev->dev, sizeof(*stdev->dma_mrpc),
1256 				stdev->dma_mrpc, stdev->dma_mrpc_dma_addr);
1257 	}
1258 	kfree(stdev);
1259 }
1260 
1261 static void stdev_kill(struct switchtec_dev *stdev)
1262 {
1263 	struct switchtec_user *stuser, *tmpuser;
1264 
1265 	pci_clear_master(stdev->pdev);
1266 
1267 	cancel_delayed_work_sync(&stdev->mrpc_timeout);
1268 
1269 	/* Mark the hardware as unavailable and complete all completions */
1270 	mutex_lock(&stdev->mrpc_mutex);
1271 	stdev->alive = false;
1272 
1273 	/* Wake up and kill any users waiting on an MRPC request */
1274 	list_for_each_entry_safe(stuser, tmpuser, &stdev->mrpc_queue, list) {
1275 		complete_all(&stuser->comp);
1276 		list_del_init(&stuser->list);
1277 		stuser_put(stuser);
1278 	}
1279 
1280 	mutex_unlock(&stdev->mrpc_mutex);
1281 
1282 	/* Wake up any users waiting on event_wq */
1283 	wake_up_interruptible(&stdev->event_wq);
1284 }
1285 
1286 static struct switchtec_dev *stdev_create(struct pci_dev *pdev)
1287 {
1288 	struct switchtec_dev *stdev;
1289 	int minor;
1290 	struct device *dev;
1291 	struct cdev *cdev;
1292 	int rc;
1293 
1294 	stdev = kzalloc_node(sizeof(*stdev), GFP_KERNEL,
1295 			     dev_to_node(&pdev->dev));
1296 	if (!stdev)
1297 		return ERR_PTR(-ENOMEM);
1298 
1299 	stdev->alive = true;
1300 	stdev->pdev = pdev;
1301 	INIT_LIST_HEAD(&stdev->mrpc_queue);
1302 	mutex_init(&stdev->mrpc_mutex);
1303 	stdev->mrpc_busy = 0;
1304 	INIT_WORK(&stdev->mrpc_work, mrpc_event_work);
1305 	INIT_DELAYED_WORK(&stdev->mrpc_timeout, mrpc_timeout_work);
1306 	INIT_WORK(&stdev->link_event_work, link_event_work);
1307 	init_waitqueue_head(&stdev->event_wq);
1308 	atomic_set(&stdev->event_cnt, 0);
1309 
1310 	dev = &stdev->dev;
1311 	device_initialize(dev);
1312 	dev->class = switchtec_class;
1313 	dev->parent = &pdev->dev;
1314 	dev->groups = switchtec_device_groups;
1315 	dev->release = stdev_release;
1316 
1317 	minor = ida_simple_get(&switchtec_minor_ida, 0, 0,
1318 			       GFP_KERNEL);
1319 	if (minor < 0) {
1320 		rc = minor;
1321 		goto err_put;
1322 	}
1323 
1324 	dev->devt = MKDEV(MAJOR(switchtec_devt), minor);
1325 	dev_set_name(dev, "switchtec%d", minor);
1326 
1327 	cdev = &stdev->cdev;
1328 	cdev_init(cdev, &switchtec_fops);
1329 	cdev->owner = THIS_MODULE;
1330 
1331 	return stdev;
1332 
1333 err_put:
1334 	put_device(&stdev->dev);
1335 	return ERR_PTR(rc);
1336 }
1337 
1338 static int mask_event(struct switchtec_dev *stdev, int eid, int idx)
1339 {
1340 	size_t off = event_regs[eid].offset;
1341 	u32 __iomem *hdr_reg;
1342 	u32 hdr;
1343 
1344 	hdr_reg = event_regs[eid].map_reg(stdev, off, idx);
1345 	hdr = ioread32(hdr_reg);
1346 
1347 	if (!(hdr & SWITCHTEC_EVENT_OCCURRED && hdr & SWITCHTEC_EVENT_EN_IRQ))
1348 		return 0;
1349 
1350 	dev_dbg(&stdev->dev, "%s: %d %d %x\n", __func__, eid, idx, hdr);
1351 	hdr &= ~(SWITCHTEC_EVENT_EN_IRQ | SWITCHTEC_EVENT_OCCURRED);
1352 	iowrite32(hdr, hdr_reg);
1353 
1354 	return 1;
1355 }
1356 
1357 static int mask_all_events(struct switchtec_dev *stdev, int eid)
1358 {
1359 	int idx;
1360 	int count = 0;
1361 
1362 	if (event_regs[eid].map_reg == part_ev_reg) {
1363 		for (idx = 0; idx < stdev->partition_count; idx++)
1364 			count += mask_event(stdev, eid, idx);
1365 	} else if (event_regs[eid].map_reg == pff_ev_reg) {
1366 		for (idx = 0; idx < stdev->pff_csr_count; idx++) {
1367 			if (!stdev->pff_local[idx])
1368 				continue;
1369 
1370 			count += mask_event(stdev, eid, idx);
1371 		}
1372 	} else {
1373 		count += mask_event(stdev, eid, 0);
1374 	}
1375 
1376 	return count;
1377 }
1378 
1379 static irqreturn_t switchtec_event_isr(int irq, void *dev)
1380 {
1381 	struct switchtec_dev *stdev = dev;
1382 	u32 reg;
1383 	irqreturn_t ret = IRQ_NONE;
1384 	int eid, event_count = 0;
1385 
1386 	reg = ioread32(&stdev->mmio_part_cfg->mrpc_comp_hdr);
1387 	if (reg & SWITCHTEC_EVENT_OCCURRED) {
1388 		dev_dbg(&stdev->dev, "%s: mrpc comp\n", __func__);
1389 		ret = IRQ_HANDLED;
1390 		schedule_work(&stdev->mrpc_work);
1391 		iowrite32(reg, &stdev->mmio_part_cfg->mrpc_comp_hdr);
1392 	}
1393 
1394 	check_link_state_events(stdev);
1395 
1396 	for (eid = 0; eid < SWITCHTEC_IOCTL_MAX_EVENTS; eid++) {
1397 		if (eid == SWITCHTEC_IOCTL_EVENT_LINK_STATE ||
1398 		    eid == SWITCHTEC_IOCTL_EVENT_MRPC_COMP)
1399 			continue;
1400 
1401 		event_count += mask_all_events(stdev, eid);
1402 	}
1403 
1404 	if (event_count) {
1405 		atomic_inc(&stdev->event_cnt);
1406 		wake_up_interruptible(&stdev->event_wq);
1407 		dev_dbg(&stdev->dev, "%s: %d events\n", __func__,
1408 			event_count);
1409 		return IRQ_HANDLED;
1410 	}
1411 
1412 	return ret;
1413 }
1414 
1415 
1416 static irqreturn_t switchtec_dma_mrpc_isr(int irq, void *dev)
1417 {
1418 	struct switchtec_dev *stdev = dev;
1419 	irqreturn_t ret = IRQ_NONE;
1420 
1421 	iowrite32(SWITCHTEC_EVENT_CLEAR |
1422 		  SWITCHTEC_EVENT_EN_IRQ,
1423 		  &stdev->mmio_part_cfg->mrpc_comp_hdr);
1424 	schedule_work(&stdev->mrpc_work);
1425 
1426 	ret = IRQ_HANDLED;
1427 	return ret;
1428 }
1429 
1430 static int switchtec_init_isr(struct switchtec_dev *stdev)
1431 {
1432 	int nvecs;
1433 	int event_irq;
1434 	int dma_mrpc_irq;
1435 	int rc;
1436 
1437 	if (nirqs < 4)
1438 		nirqs = 4;
1439 
1440 	nvecs = pci_alloc_irq_vectors(stdev->pdev, 1, nirqs,
1441 				      PCI_IRQ_MSIX | PCI_IRQ_MSI |
1442 				      PCI_IRQ_VIRTUAL);
1443 	if (nvecs < 0)
1444 		return nvecs;
1445 
1446 	event_irq = ioread16(&stdev->mmio_part_cfg->vep_vector_number);
1447 	if (event_irq < 0 || event_irq >= nvecs)
1448 		return -EFAULT;
1449 
1450 	event_irq = pci_irq_vector(stdev->pdev, event_irq);
1451 	if (event_irq < 0)
1452 		return event_irq;
1453 
1454 	rc = devm_request_irq(&stdev->pdev->dev, event_irq,
1455 				switchtec_event_isr, 0,
1456 				KBUILD_MODNAME, stdev);
1457 
1458 	if (rc)
1459 		return rc;
1460 
1461 	if (!stdev->dma_mrpc)
1462 		return rc;
1463 
1464 	dma_mrpc_irq = ioread32(&stdev->mmio_mrpc->dma_vector);
1465 	if (dma_mrpc_irq < 0 || dma_mrpc_irq >= nvecs)
1466 		return -EFAULT;
1467 
1468 	dma_mrpc_irq  = pci_irq_vector(stdev->pdev, dma_mrpc_irq);
1469 	if (dma_mrpc_irq < 0)
1470 		return dma_mrpc_irq;
1471 
1472 	rc = devm_request_irq(&stdev->pdev->dev, dma_mrpc_irq,
1473 				switchtec_dma_mrpc_isr, 0,
1474 				KBUILD_MODNAME, stdev);
1475 
1476 	return rc;
1477 }
1478 
1479 static void init_pff(struct switchtec_dev *stdev)
1480 {
1481 	int i;
1482 	u32 reg;
1483 	struct part_cfg_regs *pcfg = stdev->mmio_part_cfg;
1484 
1485 	for (i = 0; i < SWITCHTEC_MAX_PFF_CSR; i++) {
1486 		reg = ioread16(&stdev->mmio_pff_csr[i].vendor_id);
1487 		if (reg != PCI_VENDOR_ID_MICROSEMI)
1488 			break;
1489 	}
1490 
1491 	stdev->pff_csr_count = i;
1492 
1493 	reg = ioread32(&pcfg->usp_pff_inst_id);
1494 	if (reg < stdev->pff_csr_count)
1495 		stdev->pff_local[reg] = 1;
1496 
1497 	reg = ioread32(&pcfg->vep_pff_inst_id);
1498 	if (reg < stdev->pff_csr_count)
1499 		stdev->pff_local[reg] = 1;
1500 
1501 	for (i = 0; i < ARRAY_SIZE(pcfg->dsp_pff_inst_id); i++) {
1502 		reg = ioread32(&pcfg->dsp_pff_inst_id[i]);
1503 		if (reg < stdev->pff_csr_count)
1504 			stdev->pff_local[reg] = 1;
1505 	}
1506 }
1507 
1508 static int switchtec_init_pci(struct switchtec_dev *stdev,
1509 			      struct pci_dev *pdev)
1510 {
1511 	int rc;
1512 	void __iomem *map;
1513 	unsigned long res_start, res_len;
1514 	u32 __iomem *part_id;
1515 
1516 	rc = pcim_enable_device(pdev);
1517 	if (rc)
1518 		return rc;
1519 
1520 	rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
1521 	if (rc)
1522 		return rc;
1523 
1524 	pci_set_master(pdev);
1525 
1526 	res_start = pci_resource_start(pdev, 0);
1527 	res_len = pci_resource_len(pdev, 0);
1528 
1529 	if (!devm_request_mem_region(&pdev->dev, res_start,
1530 				     res_len, KBUILD_MODNAME))
1531 		return -EBUSY;
1532 
1533 	stdev->mmio_mrpc = devm_ioremap_wc(&pdev->dev, res_start,
1534 					   SWITCHTEC_GAS_TOP_CFG_OFFSET);
1535 	if (!stdev->mmio_mrpc)
1536 		return -ENOMEM;
1537 
1538 	map = devm_ioremap(&pdev->dev,
1539 			   res_start + SWITCHTEC_GAS_TOP_CFG_OFFSET,
1540 			   res_len - SWITCHTEC_GAS_TOP_CFG_OFFSET);
1541 	if (!map)
1542 		return -ENOMEM;
1543 
1544 	stdev->mmio = map - SWITCHTEC_GAS_TOP_CFG_OFFSET;
1545 	stdev->mmio_sw_event = stdev->mmio + SWITCHTEC_GAS_SW_EVENT_OFFSET;
1546 	stdev->mmio_sys_info = stdev->mmio + SWITCHTEC_GAS_SYS_INFO_OFFSET;
1547 	stdev->mmio_flash_info = stdev->mmio + SWITCHTEC_GAS_FLASH_INFO_OFFSET;
1548 	stdev->mmio_ntb = stdev->mmio + SWITCHTEC_GAS_NTB_OFFSET;
1549 
1550 	if (stdev->gen == SWITCHTEC_GEN3)
1551 		part_id = &stdev->mmio_sys_info->gen3.partition_id;
1552 	else if (stdev->gen == SWITCHTEC_GEN4)
1553 		part_id = &stdev->mmio_sys_info->gen4.partition_id;
1554 	else
1555 		return -ENOTSUPP;
1556 
1557 	stdev->partition = ioread8(part_id);
1558 	stdev->partition_count = ioread8(&stdev->mmio_ntb->partition_count);
1559 	stdev->mmio_part_cfg_all = stdev->mmio + SWITCHTEC_GAS_PART_CFG_OFFSET;
1560 	stdev->mmio_part_cfg = &stdev->mmio_part_cfg_all[stdev->partition];
1561 	stdev->mmio_pff_csr = stdev->mmio + SWITCHTEC_GAS_PFF_CSR_OFFSET;
1562 
1563 	if (stdev->partition_count < 1)
1564 		stdev->partition_count = 1;
1565 
1566 	init_pff(stdev);
1567 
1568 	pci_set_drvdata(pdev, stdev);
1569 
1570 	if (!use_dma_mrpc)
1571 		return 0;
1572 
1573 	if (ioread32(&stdev->mmio_mrpc->dma_ver) == 0)
1574 		return 0;
1575 
1576 	stdev->dma_mrpc = dma_alloc_coherent(&stdev->pdev->dev,
1577 					     sizeof(*stdev->dma_mrpc),
1578 					     &stdev->dma_mrpc_dma_addr,
1579 					     GFP_KERNEL);
1580 	if (stdev->dma_mrpc == NULL)
1581 		return -ENOMEM;
1582 
1583 	return 0;
1584 }
1585 
1586 static int switchtec_pci_probe(struct pci_dev *pdev,
1587 			       const struct pci_device_id *id)
1588 {
1589 	struct switchtec_dev *stdev;
1590 	int rc;
1591 
1592 	if (pdev->class == (PCI_CLASS_BRIDGE_OTHER << 8))
1593 		request_module_nowait("ntb_hw_switchtec");
1594 
1595 	stdev = stdev_create(pdev);
1596 	if (IS_ERR(stdev))
1597 		return PTR_ERR(stdev);
1598 
1599 	stdev->gen = id->driver_data;
1600 
1601 	rc = switchtec_init_pci(stdev, pdev);
1602 	if (rc)
1603 		goto err_put;
1604 
1605 	rc = switchtec_init_isr(stdev);
1606 	if (rc) {
1607 		dev_err(&stdev->dev, "failed to init isr.\n");
1608 		goto err_put;
1609 	}
1610 
1611 	iowrite32(SWITCHTEC_EVENT_CLEAR |
1612 		  SWITCHTEC_EVENT_EN_IRQ,
1613 		  &stdev->mmio_part_cfg->mrpc_comp_hdr);
1614 	enable_link_state_events(stdev);
1615 
1616 	if (stdev->dma_mrpc)
1617 		enable_dma_mrpc(stdev);
1618 
1619 	rc = cdev_device_add(&stdev->cdev, &stdev->dev);
1620 	if (rc)
1621 		goto err_devadd;
1622 
1623 	dev_info(&stdev->dev, "Management device registered.\n");
1624 
1625 	return 0;
1626 
1627 err_devadd:
1628 	stdev_kill(stdev);
1629 err_put:
1630 	ida_simple_remove(&switchtec_minor_ida, MINOR(stdev->dev.devt));
1631 	put_device(&stdev->dev);
1632 	return rc;
1633 }
1634 
1635 static void switchtec_pci_remove(struct pci_dev *pdev)
1636 {
1637 	struct switchtec_dev *stdev = pci_get_drvdata(pdev);
1638 
1639 	pci_set_drvdata(pdev, NULL);
1640 
1641 	cdev_device_del(&stdev->cdev, &stdev->dev);
1642 	ida_simple_remove(&switchtec_minor_ida, MINOR(stdev->dev.devt));
1643 	dev_info(&stdev->dev, "unregistered.\n");
1644 	stdev_kill(stdev);
1645 	put_device(&stdev->dev);
1646 }
1647 
1648 #define SWITCHTEC_PCI_DEVICE(device_id, gen) \
1649 	{ \
1650 		.vendor     = PCI_VENDOR_ID_MICROSEMI, \
1651 		.device     = device_id, \
1652 		.subvendor  = PCI_ANY_ID, \
1653 		.subdevice  = PCI_ANY_ID, \
1654 		.class      = (PCI_CLASS_MEMORY_OTHER << 8), \
1655 		.class_mask = 0xFFFFFFFF, \
1656 		.driver_data = gen, \
1657 	}, \
1658 	{ \
1659 		.vendor     = PCI_VENDOR_ID_MICROSEMI, \
1660 		.device     = device_id, \
1661 		.subvendor  = PCI_ANY_ID, \
1662 		.subdevice  = PCI_ANY_ID, \
1663 		.class      = (PCI_CLASS_BRIDGE_OTHER << 8), \
1664 		.class_mask = 0xFFFFFFFF, \
1665 		.driver_data = gen, \
1666 	}
1667 
1668 static const struct pci_device_id switchtec_pci_tbl[] = {
1669 	SWITCHTEC_PCI_DEVICE(0x8531, SWITCHTEC_GEN3),  //PFX 24xG3
1670 	SWITCHTEC_PCI_DEVICE(0x8532, SWITCHTEC_GEN3),  //PFX 32xG3
1671 	SWITCHTEC_PCI_DEVICE(0x8533, SWITCHTEC_GEN3),  //PFX 48xG3
1672 	SWITCHTEC_PCI_DEVICE(0x8534, SWITCHTEC_GEN3),  //PFX 64xG3
1673 	SWITCHTEC_PCI_DEVICE(0x8535, SWITCHTEC_GEN3),  //PFX 80xG3
1674 	SWITCHTEC_PCI_DEVICE(0x8536, SWITCHTEC_GEN3),  //PFX 96xG3
1675 	SWITCHTEC_PCI_DEVICE(0x8541, SWITCHTEC_GEN3),  //PSX 24xG3
1676 	SWITCHTEC_PCI_DEVICE(0x8542, SWITCHTEC_GEN3),  //PSX 32xG3
1677 	SWITCHTEC_PCI_DEVICE(0x8543, SWITCHTEC_GEN3),  //PSX 48xG3
1678 	SWITCHTEC_PCI_DEVICE(0x8544, SWITCHTEC_GEN3),  //PSX 64xG3
1679 	SWITCHTEC_PCI_DEVICE(0x8545, SWITCHTEC_GEN3),  //PSX 80xG3
1680 	SWITCHTEC_PCI_DEVICE(0x8546, SWITCHTEC_GEN3),  //PSX 96xG3
1681 	SWITCHTEC_PCI_DEVICE(0x8551, SWITCHTEC_GEN3),  //PAX 24XG3
1682 	SWITCHTEC_PCI_DEVICE(0x8552, SWITCHTEC_GEN3),  //PAX 32XG3
1683 	SWITCHTEC_PCI_DEVICE(0x8553, SWITCHTEC_GEN3),  //PAX 48XG3
1684 	SWITCHTEC_PCI_DEVICE(0x8554, SWITCHTEC_GEN3),  //PAX 64XG3
1685 	SWITCHTEC_PCI_DEVICE(0x8555, SWITCHTEC_GEN3),  //PAX 80XG3
1686 	SWITCHTEC_PCI_DEVICE(0x8556, SWITCHTEC_GEN3),  //PAX 96XG3
1687 	SWITCHTEC_PCI_DEVICE(0x8561, SWITCHTEC_GEN3),  //PFXL 24XG3
1688 	SWITCHTEC_PCI_DEVICE(0x8562, SWITCHTEC_GEN3),  //PFXL 32XG3
1689 	SWITCHTEC_PCI_DEVICE(0x8563, SWITCHTEC_GEN3),  //PFXL 48XG3
1690 	SWITCHTEC_PCI_DEVICE(0x8564, SWITCHTEC_GEN3),  //PFXL 64XG3
1691 	SWITCHTEC_PCI_DEVICE(0x8565, SWITCHTEC_GEN3),  //PFXL 80XG3
1692 	SWITCHTEC_PCI_DEVICE(0x8566, SWITCHTEC_GEN3),  //PFXL 96XG3
1693 	SWITCHTEC_PCI_DEVICE(0x8571, SWITCHTEC_GEN3),  //PFXI 24XG3
1694 	SWITCHTEC_PCI_DEVICE(0x8572, SWITCHTEC_GEN3),  //PFXI 32XG3
1695 	SWITCHTEC_PCI_DEVICE(0x8573, SWITCHTEC_GEN3),  //PFXI 48XG3
1696 	SWITCHTEC_PCI_DEVICE(0x8574, SWITCHTEC_GEN3),  //PFXI 64XG3
1697 	SWITCHTEC_PCI_DEVICE(0x8575, SWITCHTEC_GEN3),  //PFXI 80XG3
1698 	SWITCHTEC_PCI_DEVICE(0x8576, SWITCHTEC_GEN3),  //PFXI 96XG3
1699 	SWITCHTEC_PCI_DEVICE(0x4000, SWITCHTEC_GEN4),  //PFX 100XG4
1700 	SWITCHTEC_PCI_DEVICE(0x4084, SWITCHTEC_GEN4),  //PFX 84XG4
1701 	SWITCHTEC_PCI_DEVICE(0x4068, SWITCHTEC_GEN4),  //PFX 68XG4
1702 	SWITCHTEC_PCI_DEVICE(0x4052, SWITCHTEC_GEN4),  //PFX 52XG4
1703 	SWITCHTEC_PCI_DEVICE(0x4036, SWITCHTEC_GEN4),  //PFX 36XG4
1704 	SWITCHTEC_PCI_DEVICE(0x4028, SWITCHTEC_GEN4),  //PFX 28XG4
1705 	SWITCHTEC_PCI_DEVICE(0x4100, SWITCHTEC_GEN4),  //PSX 100XG4
1706 	SWITCHTEC_PCI_DEVICE(0x4184, SWITCHTEC_GEN4),  //PSX 84XG4
1707 	SWITCHTEC_PCI_DEVICE(0x4168, SWITCHTEC_GEN4),  //PSX 68XG4
1708 	SWITCHTEC_PCI_DEVICE(0x4152, SWITCHTEC_GEN4),  //PSX 52XG4
1709 	SWITCHTEC_PCI_DEVICE(0x4136, SWITCHTEC_GEN4),  //PSX 36XG4
1710 	SWITCHTEC_PCI_DEVICE(0x4128, SWITCHTEC_GEN4),  //PSX 28XG4
1711 	SWITCHTEC_PCI_DEVICE(0x4200, SWITCHTEC_GEN4),  //PAX 100XG4
1712 	SWITCHTEC_PCI_DEVICE(0x4284, SWITCHTEC_GEN4),  //PAX 84XG4
1713 	SWITCHTEC_PCI_DEVICE(0x4268, SWITCHTEC_GEN4),  //PAX 68XG4
1714 	SWITCHTEC_PCI_DEVICE(0x4252, SWITCHTEC_GEN4),  //PAX 52XG4
1715 	SWITCHTEC_PCI_DEVICE(0x4236, SWITCHTEC_GEN4),  //PAX 36XG4
1716 	SWITCHTEC_PCI_DEVICE(0x4228, SWITCHTEC_GEN4),  //PAX 28XG4
1717 	{0}
1718 };
1719 MODULE_DEVICE_TABLE(pci, switchtec_pci_tbl);
1720 
1721 static struct pci_driver switchtec_pci_driver = {
1722 	.name		= KBUILD_MODNAME,
1723 	.id_table	= switchtec_pci_tbl,
1724 	.probe		= switchtec_pci_probe,
1725 	.remove		= switchtec_pci_remove,
1726 };
1727 
1728 static int __init switchtec_init(void)
1729 {
1730 	int rc;
1731 
1732 	rc = alloc_chrdev_region(&switchtec_devt, 0, max_devices,
1733 				 "switchtec");
1734 	if (rc)
1735 		return rc;
1736 
1737 	switchtec_class = class_create(THIS_MODULE, "switchtec");
1738 	if (IS_ERR(switchtec_class)) {
1739 		rc = PTR_ERR(switchtec_class);
1740 		goto err_create_class;
1741 	}
1742 
1743 	rc = pci_register_driver(&switchtec_pci_driver);
1744 	if (rc)
1745 		goto err_pci_register;
1746 
1747 	pr_info(KBUILD_MODNAME ": loaded.\n");
1748 
1749 	return 0;
1750 
1751 err_pci_register:
1752 	class_destroy(switchtec_class);
1753 
1754 err_create_class:
1755 	unregister_chrdev_region(switchtec_devt, max_devices);
1756 
1757 	return rc;
1758 }
1759 module_init(switchtec_init);
1760 
1761 static void __exit switchtec_exit(void)
1762 {
1763 	pci_unregister_driver(&switchtec_pci_driver);
1764 	class_destroy(switchtec_class);
1765 	unregister_chrdev_region(switchtec_devt, max_devices);
1766 	ida_destroy(&switchtec_minor_ida);
1767 
1768 	pr_info(KBUILD_MODNAME ": unloaded.\n");
1769 }
1770 module_exit(switchtec_exit);
1771