xref: /linux/drivers/i2c/busses/i2c-xiic.c (revision 9a6b55ac)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * i2c-xiic.c
4  * Copyright (c) 2002-2007 Xilinx Inc.
5  * Copyright (c) 2009-2010 Intel Corporation
6  *
7  * This code was implemented by Mocean Laboratories AB when porting linux
8  * to the automotive development board Russellville. The copyright holder
9  * as seen in the header is Intel corporation.
10  * Mocean Laboratories forked off the GNU/Linux platform work into a
11  * separate company called Pelagicore AB, which committed the code to the
12  * kernel.
13  */
14 
15 /* Supports:
16  * Xilinx IIC
17  */
18 #include <linux/kernel.h>
19 #include <linux/module.h>
20 #include <linux/errno.h>
21 #include <linux/err.h>
22 #include <linux/delay.h>
23 #include <linux/platform_device.h>
24 #include <linux/i2c.h>
25 #include <linux/interrupt.h>
26 #include <linux/wait.h>
27 #include <linux/platform_data/i2c-xiic.h>
28 #include <linux/io.h>
29 #include <linux/slab.h>
30 #include <linux/of.h>
31 #include <linux/clk.h>
32 #include <linux/pm_runtime.h>
33 
34 #define DRIVER_NAME "xiic-i2c"
35 
36 enum xilinx_i2c_state {
37 	STATE_DONE,
38 	STATE_ERROR,
39 	STATE_START
40 };
41 
42 enum xiic_endian {
43 	LITTLE,
44 	BIG
45 };
46 
47 /**
48  * struct xiic_i2c - Internal representation of the XIIC I2C bus
49  * @dev:	Pointer to device structure
50  * @base:	Memory base of the HW registers
51  * @wait:	Wait queue for callers
52  * @adap:	Kernel adapter representation
53  * @tx_msg:	Messages from above to be sent
54  * @lock:	Mutual exclusion
55  * @tx_pos:	Current pos in TX message
56  * @nmsgs:	Number of messages in tx_msg
57  * @state:	See STATE_
58  * @rx_msg:	Current RX message
59  * @rx_pos:	Position within current RX message
60  * @endianness: big/little-endian byte order
61  * @clk:	Pointer to AXI4-lite input clock
62  */
63 struct xiic_i2c {
64 	struct device		*dev;
65 	void __iomem		*base;
66 	wait_queue_head_t	wait;
67 	struct i2c_adapter	adap;
68 	struct i2c_msg		*tx_msg;
69 	struct mutex		lock;
70 	unsigned int		tx_pos;
71 	unsigned int		nmsgs;
72 	enum xilinx_i2c_state	state;
73 	struct i2c_msg		*rx_msg;
74 	int			rx_pos;
75 	enum xiic_endian	endianness;
76 	struct clk *clk;
77 };
78 
79 
80 #define XIIC_MSB_OFFSET 0
81 #define XIIC_REG_OFFSET (0x100+XIIC_MSB_OFFSET)
82 
83 /*
84  * Register offsets in bytes from RegisterBase. Three is added to the
85  * base offset to access LSB (IBM style) of the word
86  */
87 #define XIIC_CR_REG_OFFSET   (0x00+XIIC_REG_OFFSET)	/* Control Register   */
88 #define XIIC_SR_REG_OFFSET   (0x04+XIIC_REG_OFFSET)	/* Status Register    */
89 #define XIIC_DTR_REG_OFFSET  (0x08+XIIC_REG_OFFSET)	/* Data Tx Register   */
90 #define XIIC_DRR_REG_OFFSET  (0x0C+XIIC_REG_OFFSET)	/* Data Rx Register   */
91 #define XIIC_ADR_REG_OFFSET  (0x10+XIIC_REG_OFFSET)	/* Address Register   */
92 #define XIIC_TFO_REG_OFFSET  (0x14+XIIC_REG_OFFSET)	/* Tx FIFO Occupancy  */
93 #define XIIC_RFO_REG_OFFSET  (0x18+XIIC_REG_OFFSET)	/* Rx FIFO Occupancy  */
94 #define XIIC_TBA_REG_OFFSET  (0x1C+XIIC_REG_OFFSET)	/* 10 Bit Address reg */
95 #define XIIC_RFD_REG_OFFSET  (0x20+XIIC_REG_OFFSET)	/* Rx FIFO Depth reg  */
96 #define XIIC_GPO_REG_OFFSET  (0x24+XIIC_REG_OFFSET)	/* Output Register    */
97 
98 /* Control Register masks */
99 #define XIIC_CR_ENABLE_DEVICE_MASK        0x01	/* Device enable = 1      */
100 #define XIIC_CR_TX_FIFO_RESET_MASK        0x02	/* Transmit FIFO reset=1  */
101 #define XIIC_CR_MSMS_MASK                 0x04	/* Master starts Txing=1  */
102 #define XIIC_CR_DIR_IS_TX_MASK            0x08	/* Dir of tx. Txing=1     */
103 #define XIIC_CR_NO_ACK_MASK               0x10	/* Tx Ack. NO ack = 1     */
104 #define XIIC_CR_REPEATED_START_MASK       0x20	/* Repeated start = 1     */
105 #define XIIC_CR_GENERAL_CALL_MASK         0x40	/* Gen Call enabled = 1   */
106 
107 /* Status Register masks */
108 #define XIIC_SR_GEN_CALL_MASK             0x01	/* 1=a mstr issued a GC   */
109 #define XIIC_SR_ADDR_AS_SLAVE_MASK        0x02	/* 1=when addr as slave   */
110 #define XIIC_SR_BUS_BUSY_MASK             0x04	/* 1 = bus is busy        */
111 #define XIIC_SR_MSTR_RDING_SLAVE_MASK     0x08	/* 1=Dir: mstr <-- slave  */
112 #define XIIC_SR_TX_FIFO_FULL_MASK         0x10	/* 1 = Tx FIFO full       */
113 #define XIIC_SR_RX_FIFO_FULL_MASK         0x20	/* 1 = Rx FIFO full       */
114 #define XIIC_SR_RX_FIFO_EMPTY_MASK        0x40	/* 1 = Rx FIFO empty      */
115 #define XIIC_SR_TX_FIFO_EMPTY_MASK        0x80	/* 1 = Tx FIFO empty      */
116 
117 /* Interrupt Status Register masks    Interrupt occurs when...       */
118 #define XIIC_INTR_ARB_LOST_MASK           0x01	/* 1 = arbitration lost   */
119 #define XIIC_INTR_TX_ERROR_MASK           0x02	/* 1=Tx error/msg complete */
120 #define XIIC_INTR_TX_EMPTY_MASK           0x04	/* 1 = Tx FIFO/reg empty  */
121 #define XIIC_INTR_RX_FULL_MASK            0x08	/* 1=Rx FIFO/reg=OCY level */
122 #define XIIC_INTR_BNB_MASK                0x10	/* 1 = Bus not busy       */
123 #define XIIC_INTR_AAS_MASK                0x20	/* 1 = when addr as slave */
124 #define XIIC_INTR_NAAS_MASK               0x40	/* 1 = not addr as slave  */
125 #define XIIC_INTR_TX_HALF_MASK            0x80	/* 1 = TX FIFO half empty */
126 
127 /* The following constants specify the depth of the FIFOs */
128 #define IIC_RX_FIFO_DEPTH         16	/* Rx fifo capacity               */
129 #define IIC_TX_FIFO_DEPTH         16	/* Tx fifo capacity               */
130 
131 /* The following constants specify groups of interrupts that are typically
132  * enabled or disables at the same time
133  */
134 #define XIIC_TX_INTERRUPTS                           \
135 (XIIC_INTR_TX_ERROR_MASK | XIIC_INTR_TX_EMPTY_MASK | XIIC_INTR_TX_HALF_MASK)
136 
137 #define XIIC_TX_RX_INTERRUPTS (XIIC_INTR_RX_FULL_MASK | XIIC_TX_INTERRUPTS)
138 
139 /*
140  * Tx Fifo upper bit masks.
141  */
142 #define XIIC_TX_DYN_START_MASK            0x0100 /* 1 = Set dynamic start */
143 #define XIIC_TX_DYN_STOP_MASK             0x0200 /* 1 = Set dynamic stop */
144 
145 /*
146  * The following constants define the register offsets for the Interrupt
147  * registers. There are some holes in the memory map for reserved addresses
148  * to allow other registers to be added and still match the memory map of the
149  * interrupt controller registers
150  */
151 #define XIIC_DGIER_OFFSET    0x1C /* Device Global Interrupt Enable Register */
152 #define XIIC_IISR_OFFSET     0x20 /* Interrupt Status Register */
153 #define XIIC_IIER_OFFSET     0x28 /* Interrupt Enable Register */
154 #define XIIC_RESETR_OFFSET   0x40 /* Reset Register */
155 
156 #define XIIC_RESET_MASK             0xAUL
157 
158 #define XIIC_PM_TIMEOUT		1000	/* ms */
159 /*
160  * The following constant is used for the device global interrupt enable
161  * register, to enable all interrupts for the device, this is the only bit
162  * in the register
163  */
164 #define XIIC_GINTR_ENABLE_MASK      0x80000000UL
165 
166 #define xiic_tx_space(i2c) ((i2c)->tx_msg->len - (i2c)->tx_pos)
167 #define xiic_rx_space(i2c) ((i2c)->rx_msg->len - (i2c)->rx_pos)
168 
169 static void xiic_start_xfer(struct xiic_i2c *i2c);
170 static void __xiic_start_xfer(struct xiic_i2c *i2c);
171 
172 /*
173  * For the register read and write functions, a little-endian and big-endian
174  * version are necessary. Endianness is detected during the probe function.
175  * Only the least significant byte [doublet] of the register are ever
176  * accessed. This requires an offset of 3 [2] from the base address for
177  * big-endian systems.
178  */
179 
180 static inline void xiic_setreg8(struct xiic_i2c *i2c, int reg, u8 value)
181 {
182 	if (i2c->endianness == LITTLE)
183 		iowrite8(value, i2c->base + reg);
184 	else
185 		iowrite8(value, i2c->base + reg + 3);
186 }
187 
188 static inline u8 xiic_getreg8(struct xiic_i2c *i2c, int reg)
189 {
190 	u8 ret;
191 
192 	if (i2c->endianness == LITTLE)
193 		ret = ioread8(i2c->base + reg);
194 	else
195 		ret = ioread8(i2c->base + reg + 3);
196 	return ret;
197 }
198 
199 static inline void xiic_setreg16(struct xiic_i2c *i2c, int reg, u16 value)
200 {
201 	if (i2c->endianness == LITTLE)
202 		iowrite16(value, i2c->base + reg);
203 	else
204 		iowrite16be(value, i2c->base + reg + 2);
205 }
206 
207 static inline void xiic_setreg32(struct xiic_i2c *i2c, int reg, int value)
208 {
209 	if (i2c->endianness == LITTLE)
210 		iowrite32(value, i2c->base + reg);
211 	else
212 		iowrite32be(value, i2c->base + reg);
213 }
214 
215 static inline int xiic_getreg32(struct xiic_i2c *i2c, int reg)
216 {
217 	u32 ret;
218 
219 	if (i2c->endianness == LITTLE)
220 		ret = ioread32(i2c->base + reg);
221 	else
222 		ret = ioread32be(i2c->base + reg);
223 	return ret;
224 }
225 
226 static inline void xiic_irq_dis(struct xiic_i2c *i2c, u32 mask)
227 {
228 	u32 ier = xiic_getreg32(i2c, XIIC_IIER_OFFSET);
229 	xiic_setreg32(i2c, XIIC_IIER_OFFSET, ier & ~mask);
230 }
231 
232 static inline void xiic_irq_en(struct xiic_i2c *i2c, u32 mask)
233 {
234 	u32 ier = xiic_getreg32(i2c, XIIC_IIER_OFFSET);
235 	xiic_setreg32(i2c, XIIC_IIER_OFFSET, ier | mask);
236 }
237 
238 static inline void xiic_irq_clr(struct xiic_i2c *i2c, u32 mask)
239 {
240 	u32 isr = xiic_getreg32(i2c, XIIC_IISR_OFFSET);
241 	xiic_setreg32(i2c, XIIC_IISR_OFFSET, isr & mask);
242 }
243 
244 static inline void xiic_irq_clr_en(struct xiic_i2c *i2c, u32 mask)
245 {
246 	xiic_irq_clr(i2c, mask);
247 	xiic_irq_en(i2c, mask);
248 }
249 
250 static void xiic_clear_rx_fifo(struct xiic_i2c *i2c)
251 {
252 	u8 sr;
253 	for (sr = xiic_getreg8(i2c, XIIC_SR_REG_OFFSET);
254 		!(sr & XIIC_SR_RX_FIFO_EMPTY_MASK);
255 		sr = xiic_getreg8(i2c, XIIC_SR_REG_OFFSET))
256 		xiic_getreg8(i2c, XIIC_DRR_REG_OFFSET);
257 }
258 
259 static void xiic_reinit(struct xiic_i2c *i2c)
260 {
261 	xiic_setreg32(i2c, XIIC_RESETR_OFFSET, XIIC_RESET_MASK);
262 
263 	/* Set receive Fifo depth to maximum (zero based). */
264 	xiic_setreg8(i2c, XIIC_RFD_REG_OFFSET, IIC_RX_FIFO_DEPTH - 1);
265 
266 	/* Reset Tx Fifo. */
267 	xiic_setreg8(i2c, XIIC_CR_REG_OFFSET, XIIC_CR_TX_FIFO_RESET_MASK);
268 
269 	/* Enable IIC Device, remove Tx Fifo reset & disable general call. */
270 	xiic_setreg8(i2c, XIIC_CR_REG_OFFSET, XIIC_CR_ENABLE_DEVICE_MASK);
271 
272 	/* make sure RX fifo is empty */
273 	xiic_clear_rx_fifo(i2c);
274 
275 	/* Enable interrupts */
276 	xiic_setreg32(i2c, XIIC_DGIER_OFFSET, XIIC_GINTR_ENABLE_MASK);
277 
278 	xiic_irq_clr_en(i2c, XIIC_INTR_ARB_LOST_MASK);
279 }
280 
281 static void xiic_deinit(struct xiic_i2c *i2c)
282 {
283 	u8 cr;
284 
285 	xiic_setreg32(i2c, XIIC_RESETR_OFFSET, XIIC_RESET_MASK);
286 
287 	/* Disable IIC Device. */
288 	cr = xiic_getreg8(i2c, XIIC_CR_REG_OFFSET);
289 	xiic_setreg8(i2c, XIIC_CR_REG_OFFSET, cr & ~XIIC_CR_ENABLE_DEVICE_MASK);
290 }
291 
292 static void xiic_read_rx(struct xiic_i2c *i2c)
293 {
294 	u8 bytes_in_fifo;
295 	int i;
296 
297 	bytes_in_fifo = xiic_getreg8(i2c, XIIC_RFO_REG_OFFSET) + 1;
298 
299 	dev_dbg(i2c->adap.dev.parent,
300 		"%s entry, bytes in fifo: %d, msg: %d, SR: 0x%x, CR: 0x%x\n",
301 		__func__, bytes_in_fifo, xiic_rx_space(i2c),
302 		xiic_getreg8(i2c, XIIC_SR_REG_OFFSET),
303 		xiic_getreg8(i2c, XIIC_CR_REG_OFFSET));
304 
305 	if (bytes_in_fifo > xiic_rx_space(i2c))
306 		bytes_in_fifo = xiic_rx_space(i2c);
307 
308 	for (i = 0; i < bytes_in_fifo; i++)
309 		i2c->rx_msg->buf[i2c->rx_pos++] =
310 			xiic_getreg8(i2c, XIIC_DRR_REG_OFFSET);
311 
312 	xiic_setreg8(i2c, XIIC_RFD_REG_OFFSET,
313 		(xiic_rx_space(i2c) > IIC_RX_FIFO_DEPTH) ?
314 		IIC_RX_FIFO_DEPTH - 1 :  xiic_rx_space(i2c) - 1);
315 }
316 
317 static int xiic_tx_fifo_space(struct xiic_i2c *i2c)
318 {
319 	/* return the actual space left in the FIFO */
320 	return IIC_TX_FIFO_DEPTH - xiic_getreg8(i2c, XIIC_TFO_REG_OFFSET) - 1;
321 }
322 
323 static void xiic_fill_tx_fifo(struct xiic_i2c *i2c)
324 {
325 	u8 fifo_space = xiic_tx_fifo_space(i2c);
326 	int len = xiic_tx_space(i2c);
327 
328 	len = (len > fifo_space) ? fifo_space : len;
329 
330 	dev_dbg(i2c->adap.dev.parent, "%s entry, len: %d, fifo space: %d\n",
331 		__func__, len, fifo_space);
332 
333 	while (len--) {
334 		u16 data = i2c->tx_msg->buf[i2c->tx_pos++];
335 		if ((xiic_tx_space(i2c) == 0) && (i2c->nmsgs == 1)) {
336 			/* last message in transfer -> STOP */
337 			data |= XIIC_TX_DYN_STOP_MASK;
338 			dev_dbg(i2c->adap.dev.parent, "%s TX STOP\n", __func__);
339 		}
340 		xiic_setreg16(i2c, XIIC_DTR_REG_OFFSET, data);
341 	}
342 }
343 
344 static void xiic_wakeup(struct xiic_i2c *i2c, int code)
345 {
346 	i2c->tx_msg = NULL;
347 	i2c->rx_msg = NULL;
348 	i2c->nmsgs = 0;
349 	i2c->state = code;
350 	wake_up(&i2c->wait);
351 }
352 
353 static irqreturn_t xiic_process(int irq, void *dev_id)
354 {
355 	struct xiic_i2c *i2c = dev_id;
356 	u32 pend, isr, ier;
357 	u32 clr = 0;
358 
359 	/* Get the interrupt Status from the IPIF. There is no clearing of
360 	 * interrupts in the IPIF. Interrupts must be cleared at the source.
361 	 * To find which interrupts are pending; AND interrupts pending with
362 	 * interrupts masked.
363 	 */
364 	mutex_lock(&i2c->lock);
365 	isr = xiic_getreg32(i2c, XIIC_IISR_OFFSET);
366 	ier = xiic_getreg32(i2c, XIIC_IIER_OFFSET);
367 	pend = isr & ier;
368 
369 	dev_dbg(i2c->adap.dev.parent, "%s: IER: 0x%x, ISR: 0x%x, pend: 0x%x\n",
370 		__func__, ier, isr, pend);
371 	dev_dbg(i2c->adap.dev.parent, "%s: SR: 0x%x, msg: %p, nmsgs: %d\n",
372 		__func__, xiic_getreg8(i2c, XIIC_SR_REG_OFFSET),
373 		i2c->tx_msg, i2c->nmsgs);
374 
375 
376 	/* Service requesting interrupt */
377 	if ((pend & XIIC_INTR_ARB_LOST_MASK) ||
378 		((pend & XIIC_INTR_TX_ERROR_MASK) &&
379 		!(pend & XIIC_INTR_RX_FULL_MASK))) {
380 		/* bus arbritration lost, or...
381 		 * Transmit error _OR_ RX completed
382 		 * if this happens when RX_FULL is not set
383 		 * this is probably a TX error
384 		 */
385 
386 		dev_dbg(i2c->adap.dev.parent, "%s error\n", __func__);
387 
388 		/* dynamic mode seem to suffer from problems if we just flushes
389 		 * fifos and the next message is a TX with len 0 (only addr)
390 		 * reset the IP instead of just flush fifos
391 		 */
392 		xiic_reinit(i2c);
393 
394 		if (i2c->rx_msg)
395 			xiic_wakeup(i2c, STATE_ERROR);
396 		if (i2c->tx_msg)
397 			xiic_wakeup(i2c, STATE_ERROR);
398 	}
399 	if (pend & XIIC_INTR_RX_FULL_MASK) {
400 		/* Receive register/FIFO is full */
401 
402 		clr |= XIIC_INTR_RX_FULL_MASK;
403 		if (!i2c->rx_msg) {
404 			dev_dbg(i2c->adap.dev.parent,
405 				"%s unexpected RX IRQ\n", __func__);
406 			xiic_clear_rx_fifo(i2c);
407 			goto out;
408 		}
409 
410 		xiic_read_rx(i2c);
411 		if (xiic_rx_space(i2c) == 0) {
412 			/* this is the last part of the message */
413 			i2c->rx_msg = NULL;
414 
415 			/* also clear TX error if there (RX complete) */
416 			clr |= (isr & XIIC_INTR_TX_ERROR_MASK);
417 
418 			dev_dbg(i2c->adap.dev.parent,
419 				"%s end of message, nmsgs: %d\n",
420 				__func__, i2c->nmsgs);
421 
422 			/* send next message if this wasn't the last,
423 			 * otherwise the transfer will be finialise when
424 			 * receiving the bus not busy interrupt
425 			 */
426 			if (i2c->nmsgs > 1) {
427 				i2c->nmsgs--;
428 				i2c->tx_msg++;
429 				dev_dbg(i2c->adap.dev.parent,
430 					"%s will start next...\n", __func__);
431 
432 				__xiic_start_xfer(i2c);
433 			}
434 		}
435 	}
436 	if (pend & XIIC_INTR_BNB_MASK) {
437 		/* IIC bus has transitioned to not busy */
438 		clr |= XIIC_INTR_BNB_MASK;
439 
440 		/* The bus is not busy, disable BusNotBusy interrupt */
441 		xiic_irq_dis(i2c, XIIC_INTR_BNB_MASK);
442 
443 		if (!i2c->tx_msg)
444 			goto out;
445 
446 		if ((i2c->nmsgs == 1) && !i2c->rx_msg &&
447 			xiic_tx_space(i2c) == 0)
448 			xiic_wakeup(i2c, STATE_DONE);
449 		else
450 			xiic_wakeup(i2c, STATE_ERROR);
451 	}
452 	if (pend & (XIIC_INTR_TX_EMPTY_MASK | XIIC_INTR_TX_HALF_MASK)) {
453 		/* Transmit register/FIFO is empty or ½ empty */
454 
455 		clr |= (pend &
456 			(XIIC_INTR_TX_EMPTY_MASK | XIIC_INTR_TX_HALF_MASK));
457 
458 		if (!i2c->tx_msg) {
459 			dev_dbg(i2c->adap.dev.parent,
460 				"%s unexpected TX IRQ\n", __func__);
461 			goto out;
462 		}
463 
464 		xiic_fill_tx_fifo(i2c);
465 
466 		/* current message sent and there is space in the fifo */
467 		if (!xiic_tx_space(i2c) && xiic_tx_fifo_space(i2c) >= 2) {
468 			dev_dbg(i2c->adap.dev.parent,
469 				"%s end of message sent, nmsgs: %d\n",
470 				__func__, i2c->nmsgs);
471 			if (i2c->nmsgs > 1) {
472 				i2c->nmsgs--;
473 				i2c->tx_msg++;
474 				__xiic_start_xfer(i2c);
475 			} else {
476 				xiic_irq_dis(i2c, XIIC_INTR_TX_HALF_MASK);
477 
478 				dev_dbg(i2c->adap.dev.parent,
479 					"%s Got TX IRQ but no more to do...\n",
480 					__func__);
481 			}
482 		} else if (!xiic_tx_space(i2c) && (i2c->nmsgs == 1))
483 			/* current frame is sent and is last,
484 			 * make sure to disable tx half
485 			 */
486 			xiic_irq_dis(i2c, XIIC_INTR_TX_HALF_MASK);
487 	}
488 out:
489 	dev_dbg(i2c->adap.dev.parent, "%s clr: 0x%x\n", __func__, clr);
490 
491 	xiic_setreg32(i2c, XIIC_IISR_OFFSET, clr);
492 	mutex_unlock(&i2c->lock);
493 	return IRQ_HANDLED;
494 }
495 
496 static int xiic_bus_busy(struct xiic_i2c *i2c)
497 {
498 	u8 sr = xiic_getreg8(i2c, XIIC_SR_REG_OFFSET);
499 
500 	return (sr & XIIC_SR_BUS_BUSY_MASK) ? -EBUSY : 0;
501 }
502 
503 static int xiic_busy(struct xiic_i2c *i2c)
504 {
505 	int tries = 3;
506 	int err;
507 
508 	if (i2c->tx_msg)
509 		return -EBUSY;
510 
511 	/* for instance if previous transfer was terminated due to TX error
512 	 * it might be that the bus is on it's way to become available
513 	 * give it at most 3 ms to wake
514 	 */
515 	err = xiic_bus_busy(i2c);
516 	while (err && tries--) {
517 		msleep(1);
518 		err = xiic_bus_busy(i2c);
519 	}
520 
521 	return err;
522 }
523 
524 static void xiic_start_recv(struct xiic_i2c *i2c)
525 {
526 	u8 rx_watermark;
527 	struct i2c_msg *msg = i2c->rx_msg = i2c->tx_msg;
528 	unsigned long flags;
529 
530 	/* Clear and enable Rx full interrupt. */
531 	xiic_irq_clr_en(i2c, XIIC_INTR_RX_FULL_MASK | XIIC_INTR_TX_ERROR_MASK);
532 
533 	/* we want to get all but last byte, because the TX_ERROR IRQ is used
534 	 * to inidicate error ACK on the address, and negative ack on the last
535 	 * received byte, so to not mix them receive all but last.
536 	 * In the case where there is only one byte to receive
537 	 * we can check if ERROR and RX full is set at the same time
538 	 */
539 	rx_watermark = msg->len;
540 	if (rx_watermark > IIC_RX_FIFO_DEPTH)
541 		rx_watermark = IIC_RX_FIFO_DEPTH;
542 	xiic_setreg8(i2c, XIIC_RFD_REG_OFFSET, rx_watermark - 1);
543 
544 	local_irq_save(flags);
545 	if (!(msg->flags & I2C_M_NOSTART))
546 		/* write the address */
547 		xiic_setreg16(i2c, XIIC_DTR_REG_OFFSET,
548 			i2c_8bit_addr_from_msg(msg) | XIIC_TX_DYN_START_MASK);
549 
550 	xiic_irq_clr_en(i2c, XIIC_INTR_BNB_MASK);
551 
552 	xiic_setreg16(i2c, XIIC_DTR_REG_OFFSET,
553 		msg->len | ((i2c->nmsgs == 1) ? XIIC_TX_DYN_STOP_MASK : 0));
554 	local_irq_restore(flags);
555 
556 	if (i2c->nmsgs == 1)
557 		/* very last, enable bus not busy as well */
558 		xiic_irq_clr_en(i2c, XIIC_INTR_BNB_MASK);
559 
560 	/* the message is tx:ed */
561 	i2c->tx_pos = msg->len;
562 }
563 
564 static void xiic_start_send(struct xiic_i2c *i2c)
565 {
566 	struct i2c_msg *msg = i2c->tx_msg;
567 
568 	xiic_irq_clr(i2c, XIIC_INTR_TX_ERROR_MASK);
569 
570 	dev_dbg(i2c->adap.dev.parent, "%s entry, msg: %p, len: %d",
571 		__func__, msg, msg->len);
572 	dev_dbg(i2c->adap.dev.parent, "%s entry, ISR: 0x%x, CR: 0x%x\n",
573 		__func__, xiic_getreg32(i2c, XIIC_IISR_OFFSET),
574 		xiic_getreg8(i2c, XIIC_CR_REG_OFFSET));
575 
576 	if (!(msg->flags & I2C_M_NOSTART)) {
577 		/* write the address */
578 		u16 data = i2c_8bit_addr_from_msg(msg) |
579 			XIIC_TX_DYN_START_MASK;
580 		if ((i2c->nmsgs == 1) && msg->len == 0)
581 			/* no data and last message -> add STOP */
582 			data |= XIIC_TX_DYN_STOP_MASK;
583 
584 		xiic_setreg16(i2c, XIIC_DTR_REG_OFFSET, data);
585 	}
586 
587 	xiic_fill_tx_fifo(i2c);
588 
589 	/* Clear any pending Tx empty, Tx Error and then enable them. */
590 	xiic_irq_clr_en(i2c, XIIC_INTR_TX_EMPTY_MASK | XIIC_INTR_TX_ERROR_MASK |
591 		XIIC_INTR_BNB_MASK);
592 }
593 
594 static irqreturn_t xiic_isr(int irq, void *dev_id)
595 {
596 	struct xiic_i2c *i2c = dev_id;
597 	u32 pend, isr, ier;
598 	irqreturn_t ret = IRQ_NONE;
599 	/* Do not processes a devices interrupts if the device has no
600 	 * interrupts pending
601 	 */
602 
603 	dev_dbg(i2c->adap.dev.parent, "%s entry\n", __func__);
604 
605 	isr = xiic_getreg32(i2c, XIIC_IISR_OFFSET);
606 	ier = xiic_getreg32(i2c, XIIC_IIER_OFFSET);
607 	pend = isr & ier;
608 	if (pend)
609 		ret = IRQ_WAKE_THREAD;
610 
611 	return ret;
612 }
613 
614 static void __xiic_start_xfer(struct xiic_i2c *i2c)
615 {
616 	int first = 1;
617 	int fifo_space = xiic_tx_fifo_space(i2c);
618 	dev_dbg(i2c->adap.dev.parent, "%s entry, msg: %p, fifos space: %d\n",
619 		__func__, i2c->tx_msg, fifo_space);
620 
621 	if (!i2c->tx_msg)
622 		return;
623 
624 	i2c->rx_pos = 0;
625 	i2c->tx_pos = 0;
626 	i2c->state = STATE_START;
627 	while ((fifo_space >= 2) && (first || (i2c->nmsgs > 1))) {
628 		if (!first) {
629 			i2c->nmsgs--;
630 			i2c->tx_msg++;
631 			i2c->tx_pos = 0;
632 		} else
633 			first = 0;
634 
635 		if (i2c->tx_msg->flags & I2C_M_RD) {
636 			/* we dont date putting several reads in the FIFO */
637 			xiic_start_recv(i2c);
638 			return;
639 		} else {
640 			xiic_start_send(i2c);
641 			if (xiic_tx_space(i2c) != 0) {
642 				/* the message could not be completely sent */
643 				break;
644 			}
645 		}
646 
647 		fifo_space = xiic_tx_fifo_space(i2c);
648 	}
649 
650 	/* there are more messages or the current one could not be completely
651 	 * put into the FIFO, also enable the half empty interrupt
652 	 */
653 	if (i2c->nmsgs > 1 || xiic_tx_space(i2c))
654 		xiic_irq_clr_en(i2c, XIIC_INTR_TX_HALF_MASK);
655 
656 }
657 
658 static void xiic_start_xfer(struct xiic_i2c *i2c)
659 {
660 	mutex_lock(&i2c->lock);
661 	xiic_reinit(i2c);
662 	__xiic_start_xfer(i2c);
663 	mutex_unlock(&i2c->lock);
664 }
665 
666 static int xiic_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num)
667 {
668 	struct xiic_i2c *i2c = i2c_get_adapdata(adap);
669 	int err;
670 
671 	dev_dbg(adap->dev.parent, "%s entry SR: 0x%x\n", __func__,
672 		xiic_getreg8(i2c, XIIC_SR_REG_OFFSET));
673 
674 	err = pm_runtime_get_sync(i2c->dev);
675 	if (err < 0)
676 		return err;
677 
678 	err = xiic_busy(i2c);
679 	if (err)
680 		goto out;
681 
682 	i2c->tx_msg = msgs;
683 	i2c->nmsgs = num;
684 
685 	xiic_start_xfer(i2c);
686 
687 	if (wait_event_timeout(i2c->wait, (i2c->state == STATE_ERROR) ||
688 		(i2c->state == STATE_DONE), HZ)) {
689 		err = (i2c->state == STATE_DONE) ? num : -EIO;
690 		goto out;
691 	} else {
692 		i2c->tx_msg = NULL;
693 		i2c->rx_msg = NULL;
694 		i2c->nmsgs = 0;
695 		err = -ETIMEDOUT;
696 		goto out;
697 	}
698 out:
699 	pm_runtime_mark_last_busy(i2c->dev);
700 	pm_runtime_put_autosuspend(i2c->dev);
701 	return err;
702 }
703 
704 static u32 xiic_func(struct i2c_adapter *adap)
705 {
706 	return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
707 }
708 
709 static const struct i2c_algorithm xiic_algorithm = {
710 	.master_xfer = xiic_xfer,
711 	.functionality = xiic_func,
712 };
713 
714 static const struct i2c_adapter_quirks xiic_quirks = {
715 	.max_read_len = 255,
716 };
717 
718 static const struct i2c_adapter xiic_adapter = {
719 	.owner = THIS_MODULE,
720 	.name = DRIVER_NAME,
721 	.class = I2C_CLASS_DEPRECATED,
722 	.algo = &xiic_algorithm,
723 	.quirks = &xiic_quirks,
724 };
725 
726 
727 static int xiic_i2c_probe(struct platform_device *pdev)
728 {
729 	struct xiic_i2c *i2c;
730 	struct xiic_i2c_platform_data *pdata;
731 	struct resource *res;
732 	int ret, irq;
733 	u8 i;
734 	u32 sr;
735 
736 	i2c = devm_kzalloc(&pdev->dev, sizeof(*i2c), GFP_KERNEL);
737 	if (!i2c)
738 		return -ENOMEM;
739 
740 	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
741 	i2c->base = devm_ioremap_resource(&pdev->dev, res);
742 	if (IS_ERR(i2c->base))
743 		return PTR_ERR(i2c->base);
744 
745 	irq = platform_get_irq(pdev, 0);
746 	if (irq < 0)
747 		return irq;
748 
749 	pdata = dev_get_platdata(&pdev->dev);
750 
751 	/* hook up driver to tree */
752 	platform_set_drvdata(pdev, i2c);
753 	i2c->adap = xiic_adapter;
754 	i2c_set_adapdata(&i2c->adap, i2c);
755 	i2c->adap.dev.parent = &pdev->dev;
756 	i2c->adap.dev.of_node = pdev->dev.of_node;
757 
758 	mutex_init(&i2c->lock);
759 	init_waitqueue_head(&i2c->wait);
760 
761 	i2c->clk = devm_clk_get(&pdev->dev, NULL);
762 	if (IS_ERR(i2c->clk)) {
763 		dev_err(&pdev->dev, "input clock not found.\n");
764 		return PTR_ERR(i2c->clk);
765 	}
766 	ret = clk_prepare_enable(i2c->clk);
767 	if (ret) {
768 		dev_err(&pdev->dev, "Unable to enable clock.\n");
769 		return ret;
770 	}
771 	i2c->dev = &pdev->dev;
772 	pm_runtime_enable(i2c->dev);
773 	pm_runtime_set_autosuspend_delay(i2c->dev, XIIC_PM_TIMEOUT);
774 	pm_runtime_use_autosuspend(i2c->dev);
775 	pm_runtime_set_active(i2c->dev);
776 	ret = devm_request_threaded_irq(&pdev->dev, irq, xiic_isr,
777 					xiic_process, IRQF_ONESHOT,
778 					pdev->name, i2c);
779 
780 	if (ret < 0) {
781 		dev_err(&pdev->dev, "Cannot claim IRQ\n");
782 		goto err_clk_dis;
783 	}
784 
785 	/*
786 	 * Detect endianness
787 	 * Try to reset the TX FIFO. Then check the EMPTY flag. If it is not
788 	 * set, assume that the endianness was wrong and swap.
789 	 */
790 	i2c->endianness = LITTLE;
791 	xiic_setreg32(i2c, XIIC_CR_REG_OFFSET, XIIC_CR_TX_FIFO_RESET_MASK);
792 	/* Reset is cleared in xiic_reinit */
793 	sr = xiic_getreg32(i2c, XIIC_SR_REG_OFFSET);
794 	if (!(sr & XIIC_SR_TX_FIFO_EMPTY_MASK))
795 		i2c->endianness = BIG;
796 
797 	xiic_reinit(i2c);
798 
799 	/* add i2c adapter to i2c tree */
800 	ret = i2c_add_adapter(&i2c->adap);
801 	if (ret) {
802 		xiic_deinit(i2c);
803 		goto err_clk_dis;
804 	}
805 
806 	if (pdata) {
807 		/* add in known devices to the bus */
808 		for (i = 0; i < pdata->num_devices; i++)
809 			i2c_new_device(&i2c->adap, pdata->devices + i);
810 	}
811 
812 	return 0;
813 
814 err_clk_dis:
815 	pm_runtime_set_suspended(&pdev->dev);
816 	pm_runtime_disable(&pdev->dev);
817 	clk_disable_unprepare(i2c->clk);
818 	return ret;
819 }
820 
821 static int xiic_i2c_remove(struct platform_device *pdev)
822 {
823 	struct xiic_i2c *i2c = platform_get_drvdata(pdev);
824 	int ret;
825 
826 	/* remove adapter & data */
827 	i2c_del_adapter(&i2c->adap);
828 
829 	ret = clk_prepare_enable(i2c->clk);
830 	if (ret) {
831 		dev_err(&pdev->dev, "Unable to enable clock.\n");
832 		return ret;
833 	}
834 	xiic_deinit(i2c);
835 	clk_disable_unprepare(i2c->clk);
836 	pm_runtime_disable(&pdev->dev);
837 
838 	return 0;
839 }
840 
841 #if defined(CONFIG_OF)
842 static const struct of_device_id xiic_of_match[] = {
843 	{ .compatible = "xlnx,xps-iic-2.00.a", },
844 	{},
845 };
846 MODULE_DEVICE_TABLE(of, xiic_of_match);
847 #endif
848 
849 static int __maybe_unused xiic_i2c_runtime_suspend(struct device *dev)
850 {
851 	struct xiic_i2c *i2c = dev_get_drvdata(dev);
852 
853 	clk_disable(i2c->clk);
854 
855 	return 0;
856 }
857 
858 static int __maybe_unused xiic_i2c_runtime_resume(struct device *dev)
859 {
860 	struct xiic_i2c *i2c = dev_get_drvdata(dev);
861 	int ret;
862 
863 	ret = clk_enable(i2c->clk);
864 	if (ret) {
865 		dev_err(dev, "Cannot enable clock.\n");
866 		return ret;
867 	}
868 
869 	return 0;
870 }
871 
872 static const struct dev_pm_ops xiic_dev_pm_ops = {
873 	SET_RUNTIME_PM_OPS(xiic_i2c_runtime_suspend,
874 			   xiic_i2c_runtime_resume, NULL)
875 };
876 static struct platform_driver xiic_i2c_driver = {
877 	.probe   = xiic_i2c_probe,
878 	.remove  = xiic_i2c_remove,
879 	.driver  = {
880 		.name = DRIVER_NAME,
881 		.of_match_table = of_match_ptr(xiic_of_match),
882 		.pm = &xiic_dev_pm_ops,
883 	},
884 };
885 
886 module_platform_driver(xiic_i2c_driver);
887 
888 MODULE_AUTHOR("info@mocean-labs.com");
889 MODULE_DESCRIPTION("Xilinx I2C bus driver");
890 MODULE_LICENSE("GPL v2");
891 MODULE_ALIAS("platform:"DRIVER_NAME);
892