xref: /linux/drivers/staging/axis-fifo/axis-fifo.c (revision db10cb9b)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Xilinx AXIS FIFO: interface to the Xilinx AXI-Stream FIFO IP core
4  *
5  * Copyright (C) 2018 Jacob Feder
6  *
7  * Authors:  Jacob Feder <jacobsfeder@gmail.com>
8  *
9  * See Xilinx PG080 document for IP details
10  */
11 
12 /* ----------------------------
13  *           includes
14  * ----------------------------
15  */
16 
17 #include <linux/kernel.h>
18 #include <linux/of.h>
19 #include <linux/platform_device.h>
20 #include <linux/wait.h>
21 #include <linux/mutex.h>
22 #include <linux/device.h>
23 #include <linux/cdev.h>
24 #include <linux/init.h>
25 #include <linux/module.h>
26 #include <linux/slab.h>
27 #include <linux/io.h>
28 #include <linux/moduleparam.h>
29 #include <linux/interrupt.h>
30 #include <linux/param.h>
31 #include <linux/fs.h>
32 #include <linux/types.h>
33 #include <linux/uaccess.h>
34 #include <linux/jiffies.h>
35 #include <linux/miscdevice.h>
36 
37 /* ----------------------------
38  *       driver parameters
39  * ----------------------------
40  */
41 
42 #define DRIVER_NAME "axis_fifo"
43 
44 #define READ_BUF_SIZE 128U /* read buffer length in words */
45 #define WRITE_BUF_SIZE 128U /* write buffer length in words */
46 
47 /* ----------------------------
48  *     IP register offsets
49  * ----------------------------
50  */
51 
52 #define XLLF_ISR_OFFSET  0x00000000  /* Interrupt Status */
53 #define XLLF_IER_OFFSET  0x00000004  /* Interrupt Enable */
54 
55 #define XLLF_TDFR_OFFSET 0x00000008  /* Transmit Reset */
56 #define XLLF_TDFV_OFFSET 0x0000000c  /* Transmit Vacancy */
57 #define XLLF_TDFD_OFFSET 0x00000010  /* Transmit Data */
58 #define XLLF_TLR_OFFSET  0x00000014  /* Transmit Length */
59 
60 #define XLLF_RDFR_OFFSET 0x00000018  /* Receive Reset */
61 #define XLLF_RDFO_OFFSET 0x0000001c  /* Receive Occupancy */
62 #define XLLF_RDFD_OFFSET 0x00000020  /* Receive Data */
63 #define XLLF_RLR_OFFSET  0x00000024  /* Receive Length */
64 #define XLLF_SRR_OFFSET  0x00000028  /* Local Link Reset */
65 #define XLLF_TDR_OFFSET  0x0000002C  /* Transmit Destination */
66 #define XLLF_RDR_OFFSET  0x00000030  /* Receive Destination */
67 
68 /* ----------------------------
69  *     reset register masks
70  * ----------------------------
71  */
72 
73 #define XLLF_RDFR_RESET_MASK        0x000000a5 /* receive reset value */
74 #define XLLF_TDFR_RESET_MASK        0x000000a5 /* Transmit reset value */
75 #define XLLF_SRR_RESET_MASK         0x000000a5 /* Local Link reset value */
76 
77 /* ----------------------------
78  *       interrupt masks
79  * ----------------------------
80  */
81 
82 #define XLLF_INT_RPURE_MASK       0x80000000 /* Receive under-read */
83 #define XLLF_INT_RPORE_MASK       0x40000000 /* Receive over-read */
84 #define XLLF_INT_RPUE_MASK        0x20000000 /* Receive underrun (empty) */
85 #define XLLF_INT_TPOE_MASK        0x10000000 /* Transmit overrun */
86 #define XLLF_INT_TC_MASK          0x08000000 /* Transmit complete */
87 #define XLLF_INT_RC_MASK          0x04000000 /* Receive complete */
88 #define XLLF_INT_TSE_MASK         0x02000000 /* Transmit length mismatch */
89 #define XLLF_INT_TRC_MASK         0x01000000 /* Transmit reset complete */
90 #define XLLF_INT_RRC_MASK         0x00800000 /* Receive reset complete */
91 #define XLLF_INT_TFPF_MASK        0x00400000 /* Tx FIFO Programmable Full */
92 #define XLLF_INT_TFPE_MASK        0x00200000 /* Tx FIFO Programmable Empty */
93 #define XLLF_INT_RFPF_MASK        0x00100000 /* Rx FIFO Programmable Full */
94 #define XLLF_INT_RFPE_MASK        0x00080000 /* Rx FIFO Programmable Empty */
95 #define XLLF_INT_ALL_MASK         0xfff80000 /* All the ints */
96 #define XLLF_INT_ERROR_MASK       0xf2000000 /* Error status ints */
97 #define XLLF_INT_RXERROR_MASK     0xe0000000 /* Receive Error status ints */
98 #define XLLF_INT_TXERROR_MASK     0x12000000 /* Transmit Error status ints */
99 
100 /* ----------------------------
101  *           globals
102  * ----------------------------
103  */
104 static long read_timeout = 1000; /* ms to wait before read() times out */
105 static long write_timeout = 1000; /* ms to wait before write() times out */
106 
107 /* ----------------------------
108  * module command-line arguments
109  * ----------------------------
110  */
111 
112 module_param(read_timeout, long, 0444);
113 MODULE_PARM_DESC(read_timeout, "ms to wait before blocking read() timing out; set to -1 for no timeout");
114 module_param(write_timeout, long, 0444);
115 MODULE_PARM_DESC(write_timeout, "ms to wait before blocking write() timing out; set to -1 for no timeout");
116 
117 /* ----------------------------
118  *            types
119  * ----------------------------
120  */
121 
122 struct axis_fifo {
123 	int irq; /* interrupt */
124 	void __iomem *base_addr; /* kernel space memory */
125 
126 	unsigned int rx_fifo_depth; /* max words in the receive fifo */
127 	unsigned int tx_fifo_depth; /* max words in the transmit fifo */
128 	int has_rx_fifo; /* whether the IP has the rx fifo enabled */
129 	int has_tx_fifo; /* whether the IP has the tx fifo enabled */
130 
131 	wait_queue_head_t read_queue; /* wait queue for asynchronos read */
132 	struct mutex read_lock; /* lock for reading */
133 	wait_queue_head_t write_queue; /* wait queue for asynchronos write */
134 	struct mutex write_lock; /* lock for writing */
135 	unsigned int write_flags; /* write file flags */
136 	unsigned int read_flags; /* read file flags */
137 
138 	struct device *dt_device; /* device created from the device tree */
139 	struct miscdevice miscdev;
140 };
141 
142 /* ----------------------------
143  *         sysfs entries
144  * ----------------------------
145  */
146 
147 static ssize_t sysfs_write(struct device *dev, const char *buf,
148 			   size_t count, unsigned int addr_offset)
149 {
150 	struct axis_fifo *fifo = dev_get_drvdata(dev);
151 	unsigned long tmp;
152 	int rc;
153 
154 	rc = kstrtoul(buf, 0, &tmp);
155 	if (rc < 0)
156 		return rc;
157 
158 	iowrite32(tmp, fifo->base_addr + addr_offset);
159 
160 	return count;
161 }
162 
163 static ssize_t sysfs_read(struct device *dev, char *buf,
164 			  unsigned int addr_offset)
165 {
166 	struct axis_fifo *fifo = dev_get_drvdata(dev);
167 	unsigned int read_val;
168 	unsigned int len;
169 	char tmp[32];
170 
171 	read_val = ioread32(fifo->base_addr + addr_offset);
172 	len =  snprintf(tmp, sizeof(tmp), "0x%x\n", read_val);
173 	memcpy(buf, tmp, len);
174 
175 	return len;
176 }
177 
178 static ssize_t isr_store(struct device *dev, struct device_attribute *attr,
179 			 const char *buf, size_t count)
180 {
181 	return sysfs_write(dev, buf, count, XLLF_ISR_OFFSET);
182 }
183 
184 static ssize_t isr_show(struct device *dev,
185 			struct device_attribute *attr, char *buf)
186 {
187 	return sysfs_read(dev, buf, XLLF_ISR_OFFSET);
188 }
189 
190 static DEVICE_ATTR_RW(isr);
191 
192 static ssize_t ier_store(struct device *dev, struct device_attribute *attr,
193 			 const char *buf, size_t count)
194 {
195 	return sysfs_write(dev, buf, count, XLLF_IER_OFFSET);
196 }
197 
198 static ssize_t ier_show(struct device *dev,
199 			struct device_attribute *attr, char *buf)
200 {
201 	return sysfs_read(dev, buf, XLLF_IER_OFFSET);
202 }
203 
204 static DEVICE_ATTR_RW(ier);
205 
206 static ssize_t tdfr_store(struct device *dev, struct device_attribute *attr,
207 			  const char *buf, size_t count)
208 {
209 	return sysfs_write(dev, buf, count, XLLF_TDFR_OFFSET);
210 }
211 
212 static DEVICE_ATTR_WO(tdfr);
213 
214 static ssize_t tdfv_show(struct device *dev,
215 			 struct device_attribute *attr, char *buf)
216 {
217 	return sysfs_read(dev, buf, XLLF_TDFV_OFFSET);
218 }
219 
220 static DEVICE_ATTR_RO(tdfv);
221 
222 static ssize_t tdfd_store(struct device *dev, struct device_attribute *attr,
223 			  const char *buf, size_t count)
224 {
225 	return sysfs_write(dev, buf, count, XLLF_TDFD_OFFSET);
226 }
227 
228 static DEVICE_ATTR_WO(tdfd);
229 
230 static ssize_t tlr_store(struct device *dev, struct device_attribute *attr,
231 			 const char *buf, size_t count)
232 {
233 	return sysfs_write(dev, buf, count, XLLF_TLR_OFFSET);
234 }
235 
236 static DEVICE_ATTR_WO(tlr);
237 
238 static ssize_t rdfr_store(struct device *dev, struct device_attribute *attr,
239 			  const char *buf, size_t count)
240 {
241 	return sysfs_write(dev, buf, count, XLLF_RDFR_OFFSET);
242 }
243 
244 static DEVICE_ATTR_WO(rdfr);
245 
246 static ssize_t rdfo_show(struct device *dev,
247 			 struct device_attribute *attr, char *buf)
248 {
249 	return sysfs_read(dev, buf, XLLF_RDFO_OFFSET);
250 }
251 
252 static DEVICE_ATTR_RO(rdfo);
253 
254 static ssize_t rdfd_show(struct device *dev,
255 			 struct device_attribute *attr, char *buf)
256 {
257 	return sysfs_read(dev, buf, XLLF_RDFD_OFFSET);
258 }
259 
260 static DEVICE_ATTR_RO(rdfd);
261 
262 static ssize_t rlr_show(struct device *dev,
263 			struct device_attribute *attr, char *buf)
264 {
265 	return sysfs_read(dev, buf, XLLF_RLR_OFFSET);
266 }
267 
268 static DEVICE_ATTR_RO(rlr);
269 
270 static ssize_t srr_store(struct device *dev, struct device_attribute *attr,
271 			 const char *buf, size_t count)
272 {
273 	return sysfs_write(dev, buf, count, XLLF_SRR_OFFSET);
274 }
275 
276 static DEVICE_ATTR_WO(srr);
277 
278 static ssize_t tdr_store(struct device *dev, struct device_attribute *attr,
279 			 const char *buf, size_t count)
280 {
281 	return sysfs_write(dev, buf, count, XLLF_TDR_OFFSET);
282 }
283 
284 static DEVICE_ATTR_WO(tdr);
285 
286 static ssize_t rdr_show(struct device *dev,
287 			struct device_attribute *attr, char *buf)
288 {
289 	return sysfs_read(dev, buf, XLLF_RDR_OFFSET);
290 }
291 
292 static DEVICE_ATTR_RO(rdr);
293 
294 static struct attribute *axis_fifo_attrs[] = {
295 	&dev_attr_isr.attr,
296 	&dev_attr_ier.attr,
297 	&dev_attr_tdfr.attr,
298 	&dev_attr_tdfv.attr,
299 	&dev_attr_tdfd.attr,
300 	&dev_attr_tlr.attr,
301 	&dev_attr_rdfr.attr,
302 	&dev_attr_rdfo.attr,
303 	&dev_attr_rdfd.attr,
304 	&dev_attr_rlr.attr,
305 	&dev_attr_srr.attr,
306 	&dev_attr_tdr.attr,
307 	&dev_attr_rdr.attr,
308 	NULL,
309 };
310 
311 static const struct attribute_group axis_fifo_attrs_group = {
312 	.name = "ip_registers",
313 	.attrs = axis_fifo_attrs,
314 };
315 
316 static const struct attribute_group *axis_fifo_attrs_groups[] = {
317 	&axis_fifo_attrs_group,
318 	NULL,
319 };
320 
321 /* ----------------------------
322  *        implementation
323  * ----------------------------
324  */
325 
326 static void reset_ip_core(struct axis_fifo *fifo)
327 {
328 	iowrite32(XLLF_SRR_RESET_MASK, fifo->base_addr + XLLF_SRR_OFFSET);
329 	iowrite32(XLLF_TDFR_RESET_MASK, fifo->base_addr + XLLF_TDFR_OFFSET);
330 	iowrite32(XLLF_RDFR_RESET_MASK, fifo->base_addr + XLLF_RDFR_OFFSET);
331 	iowrite32(XLLF_INT_TC_MASK | XLLF_INT_RC_MASK | XLLF_INT_RPURE_MASK |
332 		  XLLF_INT_RPORE_MASK | XLLF_INT_RPUE_MASK |
333 		  XLLF_INT_TPOE_MASK | XLLF_INT_TSE_MASK,
334 		  fifo->base_addr + XLLF_IER_OFFSET);
335 	iowrite32(XLLF_INT_ALL_MASK, fifo->base_addr + XLLF_ISR_OFFSET);
336 }
337 
338 /**
339  * axis_fifo_read() - Read a packet from AXIS-FIFO character device.
340  * @f: Open file.
341  * @buf: User space buffer to read to.
342  * @len: User space buffer length.
343  * @off: Buffer offset.
344  *
345  * As defined by the device's documentation, we need to check the device's
346  * occupancy before reading the length register and then the data. All these
347  * operations must be executed atomically, in order and one after the other
348  * without missing any.
349  *
350  * Returns the number of bytes read from the device or negative error code
351  *	on failure.
352  */
353 static ssize_t axis_fifo_read(struct file *f, char __user *buf,
354 			      size_t len, loff_t *off)
355 {
356 	struct axis_fifo *fifo = (struct axis_fifo *)f->private_data;
357 	size_t bytes_available;
358 	unsigned int words_available;
359 	unsigned int copied;
360 	unsigned int copy;
361 	unsigned int i;
362 	int ret;
363 	u32 tmp_buf[READ_BUF_SIZE];
364 
365 	if (fifo->read_flags & O_NONBLOCK) {
366 		/*
367 		 * Device opened in non-blocking mode. Try to lock it and then
368 		 * check if any packet is available.
369 		 */
370 		if (!mutex_trylock(&fifo->read_lock))
371 			return -EAGAIN;
372 
373 		if (!ioread32(fifo->base_addr + XLLF_RDFO_OFFSET)) {
374 			ret = -EAGAIN;
375 			goto end_unlock;
376 		}
377 	} else {
378 		/* opened in blocking mode
379 		 * wait for a packet available interrupt (or timeout)
380 		 * if nothing is currently available
381 		 */
382 		mutex_lock(&fifo->read_lock);
383 		ret = wait_event_interruptible_timeout(fifo->read_queue,
384 			ioread32(fifo->base_addr + XLLF_RDFO_OFFSET),
385 			read_timeout);
386 
387 		if (ret <= 0) {
388 			if (ret == 0) {
389 				ret = -EAGAIN;
390 			} else if (ret != -ERESTARTSYS) {
391 				dev_err(fifo->dt_device, "wait_event_interruptible_timeout() error in read (ret=%i)\n",
392 					ret);
393 			}
394 
395 			goto end_unlock;
396 		}
397 	}
398 
399 	bytes_available = ioread32(fifo->base_addr + XLLF_RLR_OFFSET);
400 	if (!bytes_available) {
401 		dev_err(fifo->dt_device, "received a packet of length 0 - fifo core will be reset\n");
402 		reset_ip_core(fifo);
403 		ret = -EIO;
404 		goto end_unlock;
405 	}
406 
407 	if (bytes_available > len) {
408 		dev_err(fifo->dt_device, "user read buffer too small (available bytes=%zu user buffer bytes=%zu) - fifo core will be reset\n",
409 			bytes_available, len);
410 		reset_ip_core(fifo);
411 		ret = -EINVAL;
412 		goto end_unlock;
413 	}
414 
415 	if (bytes_available % sizeof(u32)) {
416 		/* this probably can't happen unless IP
417 		 * registers were previously mishandled
418 		 */
419 		dev_err(fifo->dt_device, "received a packet that isn't word-aligned - fifo core will be reset\n");
420 		reset_ip_core(fifo);
421 		ret = -EIO;
422 		goto end_unlock;
423 	}
424 
425 	words_available = bytes_available / sizeof(u32);
426 
427 	/* read data into an intermediate buffer, copying the contents
428 	 * to userspace when the buffer is full
429 	 */
430 	copied = 0;
431 	while (words_available > 0) {
432 		copy = min(words_available, READ_BUF_SIZE);
433 
434 		for (i = 0; i < copy; i++) {
435 			tmp_buf[i] = ioread32(fifo->base_addr +
436 					      XLLF_RDFD_OFFSET);
437 		}
438 
439 		if (copy_to_user(buf + copied * sizeof(u32), tmp_buf,
440 				 copy * sizeof(u32))) {
441 			reset_ip_core(fifo);
442 			ret = -EFAULT;
443 			goto end_unlock;
444 		}
445 
446 		copied += copy;
447 		words_available -= copy;
448 	}
449 
450 	ret = bytes_available;
451 
452 end_unlock:
453 	mutex_unlock(&fifo->read_lock);
454 
455 	return ret;
456 }
457 
458 /**
459  * axis_fifo_write() - Write buffer to AXIS-FIFO character device.
460  * @f: Open file.
461  * @buf: User space buffer to write to the device.
462  * @len: User space buffer length.
463  * @off: Buffer offset.
464  *
465  * As defined by the device's documentation, we need to write to the device's
466  * data buffer then to the device's packet length register atomically. Also,
467  * we need to lock before checking if the device has available space to avoid
468  * any concurrency issue.
469  *
470  * Returns the number of bytes written to the device or negative error code
471  *	on failure.
472  */
473 static ssize_t axis_fifo_write(struct file *f, const char __user *buf,
474 			       size_t len, loff_t *off)
475 {
476 	struct axis_fifo *fifo = (struct axis_fifo *)f->private_data;
477 	unsigned int words_to_write;
478 	unsigned int copied;
479 	unsigned int copy;
480 	unsigned int i;
481 	int ret;
482 	u32 tmp_buf[WRITE_BUF_SIZE];
483 
484 	if (len % sizeof(u32)) {
485 		dev_err(fifo->dt_device,
486 			"tried to send a packet that isn't word-aligned\n");
487 		return -EINVAL;
488 	}
489 
490 	words_to_write = len / sizeof(u32);
491 
492 	if (!words_to_write) {
493 		dev_err(fifo->dt_device,
494 			"tried to send a packet of length 0\n");
495 		return -EINVAL;
496 	}
497 
498 	if (words_to_write > fifo->tx_fifo_depth) {
499 		dev_err(fifo->dt_device, "tried to write more words [%u] than slots in the fifo buffer [%u]\n",
500 			words_to_write, fifo->tx_fifo_depth);
501 		return -EINVAL;
502 	}
503 
504 	if (fifo->write_flags & O_NONBLOCK) {
505 		/*
506 		 * Device opened in non-blocking mode. Try to lock it and then
507 		 * check if there is any room to write the given buffer.
508 		 */
509 		if (!mutex_trylock(&fifo->write_lock))
510 			return -EAGAIN;
511 
512 		if (words_to_write > ioread32(fifo->base_addr +
513 					      XLLF_TDFV_OFFSET)) {
514 			ret = -EAGAIN;
515 			goto end_unlock;
516 		}
517 	} else {
518 		/* opened in blocking mode */
519 
520 		/* wait for an interrupt (or timeout) if there isn't
521 		 * currently enough room in the fifo
522 		 */
523 		mutex_lock(&fifo->write_lock);
524 		ret = wait_event_interruptible_timeout(fifo->write_queue,
525 			ioread32(fifo->base_addr + XLLF_TDFV_OFFSET)
526 				 >= words_to_write,
527 			write_timeout);
528 
529 		if (ret <= 0) {
530 			if (ret == 0) {
531 				ret = -EAGAIN;
532 			} else if (ret != -ERESTARTSYS) {
533 				dev_err(fifo->dt_device, "wait_event_interruptible_timeout() error in write (ret=%i)\n",
534 					ret);
535 			}
536 
537 			goto end_unlock;
538 		}
539 	}
540 
541 	/* write data from an intermediate buffer into the fifo IP, refilling
542 	 * the buffer with userspace data as needed
543 	 */
544 	copied = 0;
545 	while (words_to_write > 0) {
546 		copy = min(words_to_write, WRITE_BUF_SIZE);
547 
548 		if (copy_from_user(tmp_buf, buf + copied * sizeof(u32),
549 				   copy * sizeof(u32))) {
550 			reset_ip_core(fifo);
551 			ret = -EFAULT;
552 			goto end_unlock;
553 		}
554 
555 		for (i = 0; i < copy; i++)
556 			iowrite32(tmp_buf[i], fifo->base_addr +
557 				  XLLF_TDFD_OFFSET);
558 
559 		copied += copy;
560 		words_to_write -= copy;
561 	}
562 
563 	ret = copied * sizeof(u32);
564 
565 	/* write packet size to fifo */
566 	iowrite32(ret, fifo->base_addr + XLLF_TLR_OFFSET);
567 
568 end_unlock:
569 	mutex_unlock(&fifo->write_lock);
570 
571 	return ret;
572 }
573 
574 static irqreturn_t axis_fifo_irq(int irq, void *dw)
575 {
576 	struct axis_fifo *fifo = (struct axis_fifo *)dw;
577 	unsigned int pending_interrupts;
578 
579 	do {
580 		pending_interrupts = ioread32(fifo->base_addr +
581 					      XLLF_IER_OFFSET) &
582 					      ioread32(fifo->base_addr
583 					      + XLLF_ISR_OFFSET);
584 		if (pending_interrupts & XLLF_INT_RC_MASK) {
585 			/* packet received */
586 
587 			/* wake the reader process if it is waiting */
588 			wake_up(&fifo->read_queue);
589 
590 			/* clear interrupt */
591 			iowrite32(XLLF_INT_RC_MASK & XLLF_INT_ALL_MASK,
592 				  fifo->base_addr + XLLF_ISR_OFFSET);
593 		} else if (pending_interrupts & XLLF_INT_TC_MASK) {
594 			/* packet sent */
595 
596 			/* wake the writer process if it is waiting */
597 			wake_up(&fifo->write_queue);
598 
599 			iowrite32(XLLF_INT_TC_MASK & XLLF_INT_ALL_MASK,
600 				  fifo->base_addr + XLLF_ISR_OFFSET);
601 		} else if (pending_interrupts & XLLF_INT_TFPF_MASK) {
602 			/* transmit fifo programmable full */
603 
604 			iowrite32(XLLF_INT_TFPF_MASK & XLLF_INT_ALL_MASK,
605 				  fifo->base_addr + XLLF_ISR_OFFSET);
606 		} else if (pending_interrupts & XLLF_INT_TFPE_MASK) {
607 			/* transmit fifo programmable empty */
608 
609 			iowrite32(XLLF_INT_TFPE_MASK & XLLF_INT_ALL_MASK,
610 				  fifo->base_addr + XLLF_ISR_OFFSET);
611 		} else if (pending_interrupts & XLLF_INT_RFPF_MASK) {
612 			/* receive fifo programmable full */
613 
614 			iowrite32(XLLF_INT_RFPF_MASK & XLLF_INT_ALL_MASK,
615 				  fifo->base_addr + XLLF_ISR_OFFSET);
616 		} else if (pending_interrupts & XLLF_INT_RFPE_MASK) {
617 			/* receive fifo programmable empty */
618 
619 			iowrite32(XLLF_INT_RFPE_MASK & XLLF_INT_ALL_MASK,
620 				  fifo->base_addr + XLLF_ISR_OFFSET);
621 		} else if (pending_interrupts & XLLF_INT_TRC_MASK) {
622 			/* transmit reset complete interrupt */
623 
624 			iowrite32(XLLF_INT_TRC_MASK & XLLF_INT_ALL_MASK,
625 				  fifo->base_addr + XLLF_ISR_OFFSET);
626 		} else if (pending_interrupts & XLLF_INT_RRC_MASK) {
627 			/* receive reset complete interrupt */
628 
629 			iowrite32(XLLF_INT_RRC_MASK & XLLF_INT_ALL_MASK,
630 				  fifo->base_addr + XLLF_ISR_OFFSET);
631 		} else if (pending_interrupts & XLLF_INT_RPURE_MASK) {
632 			/* receive fifo under-read error interrupt */
633 			dev_err(fifo->dt_device,
634 				"receive under-read interrupt\n");
635 
636 			iowrite32(XLLF_INT_RPURE_MASK & XLLF_INT_ALL_MASK,
637 				  fifo->base_addr + XLLF_ISR_OFFSET);
638 		} else if (pending_interrupts & XLLF_INT_RPORE_MASK) {
639 			/* receive over-read error interrupt */
640 			dev_err(fifo->dt_device,
641 				"receive over-read interrupt\n");
642 
643 			iowrite32(XLLF_INT_RPORE_MASK & XLLF_INT_ALL_MASK,
644 				  fifo->base_addr + XLLF_ISR_OFFSET);
645 		} else if (pending_interrupts & XLLF_INT_RPUE_MASK) {
646 			/* receive underrun error interrupt */
647 			dev_err(fifo->dt_device,
648 				"receive underrun error interrupt\n");
649 
650 			iowrite32(XLLF_INT_RPUE_MASK & XLLF_INT_ALL_MASK,
651 				  fifo->base_addr + XLLF_ISR_OFFSET);
652 		} else if (pending_interrupts & XLLF_INT_TPOE_MASK) {
653 			/* transmit overrun error interrupt */
654 			dev_err(fifo->dt_device,
655 				"transmit overrun error interrupt\n");
656 
657 			iowrite32(XLLF_INT_TPOE_MASK & XLLF_INT_ALL_MASK,
658 				  fifo->base_addr + XLLF_ISR_OFFSET);
659 		} else if (pending_interrupts & XLLF_INT_TSE_MASK) {
660 			/* transmit length mismatch error interrupt */
661 			dev_err(fifo->dt_device,
662 				"transmit length mismatch error interrupt\n");
663 
664 			iowrite32(XLLF_INT_TSE_MASK & XLLF_INT_ALL_MASK,
665 				  fifo->base_addr + XLLF_ISR_OFFSET);
666 		} else if (pending_interrupts) {
667 			/* unknown interrupt type */
668 			dev_err(fifo->dt_device,
669 				"unknown interrupt(s) 0x%x\n",
670 				pending_interrupts);
671 
672 			iowrite32(XLLF_INT_ALL_MASK,
673 				  fifo->base_addr + XLLF_ISR_OFFSET);
674 		}
675 	} while (pending_interrupts);
676 
677 	return IRQ_HANDLED;
678 }
679 
680 static int axis_fifo_open(struct inode *inod, struct file *f)
681 {
682 	struct axis_fifo *fifo = container_of(f->private_data,
683 					      struct axis_fifo, miscdev);
684 	f->private_data = fifo;
685 
686 	if (((f->f_flags & O_ACCMODE) == O_WRONLY) ||
687 	    ((f->f_flags & O_ACCMODE) == O_RDWR)) {
688 		if (fifo->has_tx_fifo) {
689 			fifo->write_flags = f->f_flags;
690 		} else {
691 			dev_err(fifo->dt_device, "tried to open device for write but the transmit fifo is disabled\n");
692 			return -EPERM;
693 		}
694 	}
695 
696 	if (((f->f_flags & O_ACCMODE) == O_RDONLY) ||
697 	    ((f->f_flags & O_ACCMODE) == O_RDWR)) {
698 		if (fifo->has_rx_fifo) {
699 			fifo->read_flags = f->f_flags;
700 		} else {
701 			dev_err(fifo->dt_device, "tried to open device for read but the receive fifo is disabled\n");
702 			return -EPERM;
703 		}
704 	}
705 
706 	return 0;
707 }
708 
709 static int axis_fifo_close(struct inode *inod, struct file *f)
710 {
711 	f->private_data = NULL;
712 
713 	return 0;
714 }
715 
716 static const struct file_operations fops = {
717 	.owner = THIS_MODULE,
718 	.open = axis_fifo_open,
719 	.release = axis_fifo_close,
720 	.read = axis_fifo_read,
721 	.write = axis_fifo_write
722 };
723 
724 /* read named property from the device tree */
725 static int get_dts_property(struct axis_fifo *fifo,
726 			    char *name, unsigned int *var)
727 {
728 	int rc;
729 
730 	rc = of_property_read_u32(fifo->dt_device->of_node, name, var);
731 	if (rc) {
732 		dev_err(fifo->dt_device, "couldn't read IP dts property '%s'",
733 			name);
734 		return rc;
735 	}
736 	dev_dbg(fifo->dt_device, "dts property '%s' = %u\n",
737 		name, *var);
738 
739 	return 0;
740 }
741 
742 static int axis_fifo_parse_dt(struct axis_fifo *fifo)
743 {
744 	int ret;
745 	unsigned int value;
746 
747 	ret = get_dts_property(fifo, "xlnx,axi-str-rxd-tdata-width", &value);
748 	if (ret) {
749 		dev_err(fifo->dt_device, "missing xlnx,axi-str-rxd-tdata-width property\n");
750 		goto end;
751 	} else if (value != 32) {
752 		dev_err(fifo->dt_device, "xlnx,axi-str-rxd-tdata-width only supports 32 bits\n");
753 		ret = -EIO;
754 		goto end;
755 	}
756 
757 	ret = get_dts_property(fifo, "xlnx,axi-str-txd-tdata-width", &value);
758 	if (ret) {
759 		dev_err(fifo->dt_device, "missing xlnx,axi-str-txd-tdata-width property\n");
760 		goto end;
761 	} else if (value != 32) {
762 		dev_err(fifo->dt_device, "xlnx,axi-str-txd-tdata-width only supports 32 bits\n");
763 		ret = -EIO;
764 		goto end;
765 	}
766 
767 	ret = get_dts_property(fifo, "xlnx,rx-fifo-depth",
768 			       &fifo->rx_fifo_depth);
769 	if (ret) {
770 		dev_err(fifo->dt_device, "missing xlnx,rx-fifo-depth property\n");
771 		ret = -EIO;
772 		goto end;
773 	}
774 
775 	ret = get_dts_property(fifo, "xlnx,tx-fifo-depth",
776 			       &fifo->tx_fifo_depth);
777 	if (ret) {
778 		dev_err(fifo->dt_device, "missing xlnx,tx-fifo-depth property\n");
779 		ret = -EIO;
780 		goto end;
781 	}
782 
783 	/* IP sets TDFV to fifo depth - 4 so we will do the same */
784 	fifo->tx_fifo_depth -= 4;
785 
786 	ret = get_dts_property(fifo, "xlnx,use-rx-data", &fifo->has_rx_fifo);
787 	if (ret) {
788 		dev_err(fifo->dt_device, "missing xlnx,use-rx-data property\n");
789 		ret = -EIO;
790 		goto end;
791 	}
792 
793 	ret = get_dts_property(fifo, "xlnx,use-tx-data", &fifo->has_tx_fifo);
794 	if (ret) {
795 		dev_err(fifo->dt_device, "missing xlnx,use-tx-data property\n");
796 		ret = -EIO;
797 		goto end;
798 	}
799 
800 end:
801 	return ret;
802 }
803 
804 static int axis_fifo_probe(struct platform_device *pdev)
805 {
806 	struct resource *r_mem; /* IO mem resources */
807 	struct device *dev = &pdev->dev; /* OS device (from device tree) */
808 	struct axis_fifo *fifo = NULL;
809 	char *device_name;
810 	int rc = 0; /* error return value */
811 
812 	/* ----------------------------
813 	 *     init wrapper device
814 	 * ----------------------------
815 	 */
816 
817 	device_name = devm_kzalloc(dev, 32, GFP_KERNEL);
818 	if (!device_name)
819 		return -ENOMEM;
820 
821 	/* allocate device wrapper memory */
822 	fifo = devm_kzalloc(dev, sizeof(*fifo), GFP_KERNEL);
823 	if (!fifo)
824 		return -ENOMEM;
825 
826 	dev_set_drvdata(dev, fifo);
827 	fifo->dt_device = dev;
828 
829 	init_waitqueue_head(&fifo->read_queue);
830 	init_waitqueue_head(&fifo->write_queue);
831 
832 	mutex_init(&fifo->read_lock);
833 	mutex_init(&fifo->write_lock);
834 
835 	/* ----------------------------
836 	 *   init device memory space
837 	 * ----------------------------
838 	 */
839 
840 	/* get iospace for the device and request physical memory */
841 	fifo->base_addr = devm_platform_get_and_ioremap_resource(pdev, 0, &r_mem);
842 	if (IS_ERR(fifo->base_addr)) {
843 		rc = PTR_ERR(fifo->base_addr);
844 		goto err_initial;
845 	}
846 
847 	dev_dbg(fifo->dt_device, "remapped memory to 0x%p\n", fifo->base_addr);
848 
849 	/* create unique device name */
850 	snprintf(device_name, 32, "%s_%pa", DRIVER_NAME, &r_mem->start);
851 	dev_dbg(fifo->dt_device, "device name [%s]\n", device_name);
852 
853 	/* ----------------------------
854 	 *          init IP
855 	 * ----------------------------
856 	 */
857 
858 	rc = axis_fifo_parse_dt(fifo);
859 	if (rc)
860 		goto err_initial;
861 
862 	reset_ip_core(fifo);
863 
864 	/* ----------------------------
865 	 *    init device interrupts
866 	 * ----------------------------
867 	 */
868 
869 	/* get IRQ resource */
870 	rc = platform_get_irq(pdev, 0);
871 	if (rc < 0)
872 		goto err_initial;
873 
874 	/* request IRQ */
875 	fifo->irq = rc;
876 	rc = devm_request_irq(fifo->dt_device, fifo->irq, &axis_fifo_irq, 0,
877 			      DRIVER_NAME, fifo);
878 	if (rc) {
879 		dev_err(fifo->dt_device, "couldn't allocate interrupt %i\n",
880 			fifo->irq);
881 		goto err_initial;
882 	}
883 
884 	/* ----------------------------
885 	 *      init char device
886 	 * ----------------------------
887 	 */
888 
889 	/* create character device */
890 	fifo->miscdev.fops = &fops;
891 	fifo->miscdev.minor = MISC_DYNAMIC_MINOR;
892 	fifo->miscdev.name = device_name;
893 	fifo->miscdev.groups = axis_fifo_attrs_groups;
894 	fifo->miscdev.parent = dev;
895 	rc = misc_register(&fifo->miscdev);
896 	if (rc < 0)
897 		goto err_initial;
898 
899 	return 0;
900 
901 err_initial:
902 	dev_set_drvdata(dev, NULL);
903 	return rc;
904 }
905 
906 static void axis_fifo_remove(struct platform_device *pdev)
907 {
908 	struct device *dev = &pdev->dev;
909 	struct axis_fifo *fifo = dev_get_drvdata(dev);
910 
911 	misc_deregister(&fifo->miscdev);
912 	dev_set_drvdata(dev, NULL);
913 }
914 
915 static const struct of_device_id axis_fifo_of_match[] = {
916 	{ .compatible = "xlnx,axi-fifo-mm-s-4.1", },
917 	{},
918 };
919 MODULE_DEVICE_TABLE(of, axis_fifo_of_match);
920 
921 static struct platform_driver axis_fifo_driver = {
922 	.driver = {
923 		.name = DRIVER_NAME,
924 		.of_match_table	= axis_fifo_of_match,
925 	},
926 	.probe		= axis_fifo_probe,
927 	.remove_new	= axis_fifo_remove,
928 };
929 
930 static int __init axis_fifo_init(void)
931 {
932 	if (read_timeout >= 0)
933 		read_timeout = msecs_to_jiffies(read_timeout);
934 	else
935 		read_timeout = MAX_SCHEDULE_TIMEOUT;
936 
937 	if (write_timeout >= 0)
938 		write_timeout = msecs_to_jiffies(write_timeout);
939 	else
940 		write_timeout = MAX_SCHEDULE_TIMEOUT;
941 
942 	pr_info("axis-fifo driver loaded with parameters read_timeout = %li, write_timeout = %li\n",
943 		read_timeout, write_timeout);
944 	return platform_driver_register(&axis_fifo_driver);
945 }
946 
947 module_init(axis_fifo_init);
948 
949 static void __exit axis_fifo_exit(void)
950 {
951 	platform_driver_unregister(&axis_fifo_driver);
952 }
953 
954 module_exit(axis_fifo_exit);
955 
956 MODULE_LICENSE("GPL");
957 MODULE_AUTHOR("Jacob Feder <jacobsfeder@gmail.com>");
958 MODULE_DESCRIPTION("Xilinx AXI-Stream FIFO v4.1 IP core driver");
959