1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Silvaco dual-role I3C master driver
4 *
5 * Copyright (C) 2020 Silvaco
6 * Author: Miquel RAYNAL <miquel.raynal@bootlin.com>
7 * Based on a work from: Conor Culhane <conor.culhane@silvaco.com>
8 */
9
10 #include <linux/bitfield.h>
11 #include <linux/clk.h>
12 #include <linux/completion.h>
13 #include <linux/errno.h>
14 #include <linux/i3c/master.h>
15 #include <linux/interrupt.h>
16 #include <linux/iopoll.h>
17 #include <linux/list.h>
18 #include <linux/module.h>
19 #include <linux/of.h>
20 #include <linux/pinctrl/consumer.h>
21 #include <linux/platform_device.h>
22 #include <linux/pm_runtime.h>
23
24 /* Master Mode Registers */
25 #define SVC_I3C_MCONFIG 0x000
26 #define SVC_I3C_MCONFIG_MASTER_EN BIT(0)
27 #define SVC_I3C_MCONFIG_DISTO(x) FIELD_PREP(BIT(3), (x))
28 #define SVC_I3C_MCONFIG_HKEEP(x) FIELD_PREP(GENMASK(5, 4), (x))
29 #define SVC_I3C_MCONFIG_ODSTOP(x) FIELD_PREP(BIT(6), (x))
30 #define SVC_I3C_MCONFIG_PPBAUD(x) FIELD_PREP(GENMASK(11, 8), (x))
31 #define SVC_I3C_MCONFIG_PPLOW(x) FIELD_PREP(GENMASK(15, 12), (x))
32 #define SVC_I3C_MCONFIG_ODBAUD(x) FIELD_PREP(GENMASK(23, 16), (x))
33 #define SVC_I3C_MCONFIG_ODHPP(x) FIELD_PREP(BIT(24), (x))
34 #define SVC_I3C_MCONFIG_SKEW(x) FIELD_PREP(GENMASK(27, 25), (x))
35 #define SVC_I3C_MCONFIG_I2CBAUD(x) FIELD_PREP(GENMASK(31, 28), (x))
36
37 #define SVC_I3C_MCTRL 0x084
38 #define SVC_I3C_MCTRL_REQUEST_MASK GENMASK(2, 0)
39 #define SVC_I3C_MCTRL_REQUEST_NONE 0
40 #define SVC_I3C_MCTRL_REQUEST_START_ADDR 1
41 #define SVC_I3C_MCTRL_REQUEST_STOP 2
42 #define SVC_I3C_MCTRL_REQUEST_IBI_ACKNACK 3
43 #define SVC_I3C_MCTRL_REQUEST_PROC_DAA 4
44 #define SVC_I3C_MCTRL_REQUEST_AUTO_IBI 7
45 #define SVC_I3C_MCTRL_TYPE_I3C 0
46 #define SVC_I3C_MCTRL_TYPE_I2C BIT(4)
47 #define SVC_I3C_MCTRL_IBIRESP_AUTO 0
48 #define SVC_I3C_MCTRL_IBIRESP_ACK_WITHOUT_BYTE 0
49 #define SVC_I3C_MCTRL_IBIRESP_ACK_WITH_BYTE BIT(7)
50 #define SVC_I3C_MCTRL_IBIRESP_NACK BIT(6)
51 #define SVC_I3C_MCTRL_IBIRESP_MANUAL GENMASK(7, 6)
52 #define SVC_I3C_MCTRL_DIR(x) FIELD_PREP(BIT(8), (x))
53 #define SVC_I3C_MCTRL_DIR_WRITE 0
54 #define SVC_I3C_MCTRL_DIR_READ 1
55 #define SVC_I3C_MCTRL_ADDR(x) FIELD_PREP(GENMASK(15, 9), (x))
56 #define SVC_I3C_MCTRL_RDTERM(x) FIELD_PREP(GENMASK(23, 16), (x))
57
58 #define SVC_I3C_MSTATUS 0x088
59 #define SVC_I3C_MSTATUS_STATE(x) FIELD_GET(GENMASK(2, 0), (x))
60 #define SVC_I3C_MSTATUS_STATE_DAA(x) (SVC_I3C_MSTATUS_STATE(x) == 5)
61 #define SVC_I3C_MSTATUS_STATE_IDLE(x) (SVC_I3C_MSTATUS_STATE(x) == 0)
62 #define SVC_I3C_MSTATUS_BETWEEN(x) FIELD_GET(BIT(4), (x))
63 #define SVC_I3C_MSTATUS_NACKED(x) FIELD_GET(BIT(5), (x))
64 #define SVC_I3C_MSTATUS_IBITYPE(x) FIELD_GET(GENMASK(7, 6), (x))
65 #define SVC_I3C_MSTATUS_IBITYPE_IBI 1
66 #define SVC_I3C_MSTATUS_IBITYPE_MASTER_REQUEST 2
67 #define SVC_I3C_MSTATUS_IBITYPE_HOT_JOIN 3
68 #define SVC_I3C_MINT_SLVSTART BIT(8)
69 #define SVC_I3C_MINT_MCTRLDONE BIT(9)
70 #define SVC_I3C_MINT_COMPLETE BIT(10)
71 #define SVC_I3C_MINT_RXPEND BIT(11)
72 #define SVC_I3C_MINT_TXNOTFULL BIT(12)
73 #define SVC_I3C_MINT_IBIWON BIT(13)
74 #define SVC_I3C_MINT_ERRWARN BIT(15)
75 #define SVC_I3C_MSTATUS_SLVSTART(x) FIELD_GET(SVC_I3C_MINT_SLVSTART, (x))
76 #define SVC_I3C_MSTATUS_MCTRLDONE(x) FIELD_GET(SVC_I3C_MINT_MCTRLDONE, (x))
77 #define SVC_I3C_MSTATUS_COMPLETE(x) FIELD_GET(SVC_I3C_MINT_COMPLETE, (x))
78 #define SVC_I3C_MSTATUS_RXPEND(x) FIELD_GET(SVC_I3C_MINT_RXPEND, (x))
79 #define SVC_I3C_MSTATUS_TXNOTFULL(x) FIELD_GET(SVC_I3C_MINT_TXNOTFULL, (x))
80 #define SVC_I3C_MSTATUS_IBIWON(x) FIELD_GET(SVC_I3C_MINT_IBIWON, (x))
81 #define SVC_I3C_MSTATUS_ERRWARN(x) FIELD_GET(SVC_I3C_MINT_ERRWARN, (x))
82 #define SVC_I3C_MSTATUS_IBIADDR(x) FIELD_GET(GENMASK(30, 24), (x))
83
84 #define SVC_I3C_IBIRULES 0x08C
85 #define SVC_I3C_IBIRULES_ADDR(slot, addr) FIELD_PREP(GENMASK(29, 0), \
86 ((addr) & 0x3F) << ((slot) * 6))
87 #define SVC_I3C_IBIRULES_ADDRS 5
88 #define SVC_I3C_IBIRULES_MSB0 BIT(30)
89 #define SVC_I3C_IBIRULES_NOBYTE BIT(31)
90 #define SVC_I3C_IBIRULES_MANDBYTE 0
91 #define SVC_I3C_MINTSET 0x090
92 #define SVC_I3C_MINTCLR 0x094
93 #define SVC_I3C_MINTMASKED 0x098
94 #define SVC_I3C_MERRWARN 0x09C
95 #define SVC_I3C_MERRWARN_NACK BIT(2)
96 #define SVC_I3C_MERRWARN_TIMEOUT BIT(20)
97 #define SVC_I3C_MDMACTRL 0x0A0
98 #define SVC_I3C_MDATACTRL 0x0AC
99 #define SVC_I3C_MDATACTRL_FLUSHTB BIT(0)
100 #define SVC_I3C_MDATACTRL_FLUSHRB BIT(1)
101 #define SVC_I3C_MDATACTRL_UNLOCK_TRIG BIT(3)
102 #define SVC_I3C_MDATACTRL_TXTRIG_FIFO_NOT_FULL GENMASK(5, 4)
103 #define SVC_I3C_MDATACTRL_RXTRIG_FIFO_NOT_EMPTY 0
104 #define SVC_I3C_MDATACTRL_RXCOUNT(x) FIELD_GET(GENMASK(28, 24), (x))
105 #define SVC_I3C_MDATACTRL_TXFULL BIT(30)
106 #define SVC_I3C_MDATACTRL_RXEMPTY BIT(31)
107
108 #define SVC_I3C_MWDATAB 0x0B0
109 #define SVC_I3C_MWDATAB_END BIT(8)
110
111 #define SVC_I3C_MWDATABE 0x0B4
112 #define SVC_I3C_MWDATAH 0x0B8
113 #define SVC_I3C_MWDATAHE 0x0BC
114 #define SVC_I3C_MRDATAB 0x0C0
115 #define SVC_I3C_MRDATAH 0x0C8
116 #define SVC_I3C_MWMSG_SDR 0x0D0
117 #define SVC_I3C_MRMSG_SDR 0x0D4
118 #define SVC_I3C_MWMSG_DDR 0x0D8
119 #define SVC_I3C_MRMSG_DDR 0x0DC
120
121 #define SVC_I3C_MDYNADDR 0x0E4
122 #define SVC_MDYNADDR_VALID BIT(0)
123 #define SVC_MDYNADDR_ADDR(x) FIELD_PREP(GENMASK(7, 1), (x))
124
125 #define SVC_I3C_MAX_DEVS 32
126 #define SVC_I3C_PM_TIMEOUT_MS 1000
127
128 /* This parameter depends on the implementation and may be tuned */
129 #define SVC_I3C_FIFO_SIZE 16
130
131 #define SVC_I3C_EVENT_IBI BIT(0)
132 #define SVC_I3C_EVENT_HOTJOIN BIT(1)
133
134 struct svc_i3c_cmd {
135 u8 addr;
136 bool rnw;
137 u8 *in;
138 const void *out;
139 unsigned int len;
140 unsigned int actual_len;
141 struct i3c_priv_xfer *xfer;
142 bool continued;
143 };
144
145 struct svc_i3c_xfer {
146 struct list_head node;
147 struct completion comp;
148 int ret;
149 unsigned int type;
150 unsigned int ncmds;
151 struct svc_i3c_cmd cmds[] __counted_by(ncmds);
152 };
153
154 struct svc_i3c_regs_save {
155 u32 mconfig;
156 u32 mdynaddr;
157 };
158
159 /**
160 * struct svc_i3c_master - Silvaco I3C Master structure
161 * @base: I3C master controller
162 * @dev: Corresponding device
163 * @regs: Memory mapping
164 * @saved_regs: Volatile values for PM operations
165 * @free_slots: Bit array of available slots
166 * @addrs: Array containing the dynamic addresses of each attached device
167 * @descs: Array of descriptors, one per attached device
168 * @hj_work: Hot-join work
169 * @ibi_work: IBI work
170 * @irq: Main interrupt
171 * @pclk: System clock
172 * @fclk: Fast clock (bus)
173 * @sclk: Slow clock (other events)
174 * @xferqueue: Transfer queue structure
175 * @xferqueue.list: List member
176 * @xferqueue.cur: Current ongoing transfer
177 * @xferqueue.lock: Queue lock
178 * @ibi: IBI structure
179 * @ibi.num_slots: Number of slots available in @ibi.slots
180 * @ibi.slots: Available IBI slots
181 * @ibi.tbq_slot: To be queued IBI slot
182 * @ibi.lock: IBI lock
183 * @lock: Transfer lock, protect between IBI work thread and callbacks from master
184 * @enabled_events: Bit masks for enable events (IBI, HotJoin).
185 */
186 struct svc_i3c_master {
187 struct i3c_master_controller base;
188 struct device *dev;
189 void __iomem *regs;
190 struct svc_i3c_regs_save saved_regs;
191 u32 free_slots;
192 u8 addrs[SVC_I3C_MAX_DEVS];
193 struct i3c_dev_desc *descs[SVC_I3C_MAX_DEVS];
194 struct work_struct hj_work;
195 struct work_struct ibi_work;
196 int irq;
197 struct clk *pclk;
198 struct clk *fclk;
199 struct clk *sclk;
200 struct {
201 struct list_head list;
202 struct svc_i3c_xfer *cur;
203 /* Prevent races between transfers */
204 spinlock_t lock;
205 } xferqueue;
206 struct {
207 unsigned int num_slots;
208 struct i3c_dev_desc **slots;
209 struct i3c_ibi_slot *tbq_slot;
210 /* Prevent races within IBI handlers */
211 spinlock_t lock;
212 } ibi;
213 struct mutex lock;
214 int enabled_events;
215 };
216
217 /**
218 * struct svc_i3c_i2c_dev_data - Device specific data
219 * @index: Index in the master tables corresponding to this device
220 * @ibi: IBI slot index in the master structure
221 * @ibi_pool: IBI pool associated to this device
222 */
223 struct svc_i3c_i2c_dev_data {
224 u8 index;
225 int ibi;
226 struct i3c_generic_ibi_pool *ibi_pool;
227 };
228
is_events_enabled(struct svc_i3c_master * master,u32 mask)229 static inline bool is_events_enabled(struct svc_i3c_master *master, u32 mask)
230 {
231 return !!(master->enabled_events & mask);
232 }
233
svc_i3c_master_error(struct svc_i3c_master * master)234 static bool svc_i3c_master_error(struct svc_i3c_master *master)
235 {
236 u32 mstatus, merrwarn;
237
238 mstatus = readl(master->regs + SVC_I3C_MSTATUS);
239 if (SVC_I3C_MSTATUS_ERRWARN(mstatus)) {
240 merrwarn = readl(master->regs + SVC_I3C_MERRWARN);
241 writel(merrwarn, master->regs + SVC_I3C_MERRWARN);
242
243 /* Ignore timeout error */
244 if (merrwarn & SVC_I3C_MERRWARN_TIMEOUT) {
245 dev_dbg(master->dev, "Warning condition: MSTATUS 0x%08x, MERRWARN 0x%08x\n",
246 mstatus, merrwarn);
247 return false;
248 }
249
250 dev_err(master->dev,
251 "Error condition: MSTATUS 0x%08x, MERRWARN 0x%08x\n",
252 mstatus, merrwarn);
253
254 return true;
255 }
256
257 return false;
258 }
259
svc_i3c_master_enable_interrupts(struct svc_i3c_master * master,u32 mask)260 static void svc_i3c_master_enable_interrupts(struct svc_i3c_master *master, u32 mask)
261 {
262 writel(mask, master->regs + SVC_I3C_MINTSET);
263 }
264
svc_i3c_master_disable_interrupts(struct svc_i3c_master * master)265 static void svc_i3c_master_disable_interrupts(struct svc_i3c_master *master)
266 {
267 u32 mask = readl(master->regs + SVC_I3C_MINTSET);
268
269 writel(mask, master->regs + SVC_I3C_MINTCLR);
270 }
271
svc_i3c_master_clear_merrwarn(struct svc_i3c_master * master)272 static void svc_i3c_master_clear_merrwarn(struct svc_i3c_master *master)
273 {
274 /* Clear pending warnings */
275 writel(readl(master->regs + SVC_I3C_MERRWARN),
276 master->regs + SVC_I3C_MERRWARN);
277 }
278
svc_i3c_master_flush_fifo(struct svc_i3c_master * master)279 static void svc_i3c_master_flush_fifo(struct svc_i3c_master *master)
280 {
281 /* Flush FIFOs */
282 writel(SVC_I3C_MDATACTRL_FLUSHTB | SVC_I3C_MDATACTRL_FLUSHRB,
283 master->regs + SVC_I3C_MDATACTRL);
284 }
285
svc_i3c_master_reset_fifo_trigger(struct svc_i3c_master * master)286 static void svc_i3c_master_reset_fifo_trigger(struct svc_i3c_master *master)
287 {
288 u32 reg;
289
290 /* Set RX and TX tigger levels, flush FIFOs */
291 reg = SVC_I3C_MDATACTRL_FLUSHTB |
292 SVC_I3C_MDATACTRL_FLUSHRB |
293 SVC_I3C_MDATACTRL_UNLOCK_TRIG |
294 SVC_I3C_MDATACTRL_TXTRIG_FIFO_NOT_FULL |
295 SVC_I3C_MDATACTRL_RXTRIG_FIFO_NOT_EMPTY;
296 writel(reg, master->regs + SVC_I3C_MDATACTRL);
297 }
298
svc_i3c_master_reset(struct svc_i3c_master * master)299 static void svc_i3c_master_reset(struct svc_i3c_master *master)
300 {
301 svc_i3c_master_clear_merrwarn(master);
302 svc_i3c_master_reset_fifo_trigger(master);
303 svc_i3c_master_disable_interrupts(master);
304 }
305
306 static inline struct svc_i3c_master *
to_svc_i3c_master(struct i3c_master_controller * master)307 to_svc_i3c_master(struct i3c_master_controller *master)
308 {
309 return container_of(master, struct svc_i3c_master, base);
310 }
311
svc_i3c_master_hj_work(struct work_struct * work)312 static void svc_i3c_master_hj_work(struct work_struct *work)
313 {
314 struct svc_i3c_master *master;
315
316 master = container_of(work, struct svc_i3c_master, hj_work);
317 i3c_master_do_daa(&master->base);
318 }
319
320 static struct i3c_dev_desc *
svc_i3c_master_dev_from_addr(struct svc_i3c_master * master,unsigned int ibiaddr)321 svc_i3c_master_dev_from_addr(struct svc_i3c_master *master,
322 unsigned int ibiaddr)
323 {
324 int i;
325
326 for (i = 0; i < SVC_I3C_MAX_DEVS; i++)
327 if (master->addrs[i] == ibiaddr)
328 break;
329
330 if (i == SVC_I3C_MAX_DEVS)
331 return NULL;
332
333 return master->descs[i];
334 }
335
svc_i3c_master_emit_stop(struct svc_i3c_master * master)336 static void svc_i3c_master_emit_stop(struct svc_i3c_master *master)
337 {
338 writel(SVC_I3C_MCTRL_REQUEST_STOP, master->regs + SVC_I3C_MCTRL);
339
340 /*
341 * This delay is necessary after the emission of a stop, otherwise eg.
342 * repeating IBIs do not get detected. There is a note in the manual
343 * about it, stating that the stop condition might not be settled
344 * correctly if a start condition follows too rapidly.
345 */
346 udelay(1);
347 }
348
svc_i3c_master_handle_ibi(struct svc_i3c_master * master,struct i3c_dev_desc * dev)349 static int svc_i3c_master_handle_ibi(struct svc_i3c_master *master,
350 struct i3c_dev_desc *dev)
351 {
352 struct svc_i3c_i2c_dev_data *data = i3c_dev_get_master_data(dev);
353 struct i3c_ibi_slot *slot;
354 unsigned int count;
355 u32 mdatactrl;
356 int ret, val;
357 u8 *buf;
358
359 slot = i3c_generic_ibi_get_free_slot(data->ibi_pool);
360 if (!slot)
361 return -ENOSPC;
362
363 slot->len = 0;
364 buf = slot->data;
365
366 ret = readl_relaxed_poll_timeout(master->regs + SVC_I3C_MSTATUS, val,
367 SVC_I3C_MSTATUS_COMPLETE(val), 0, 1000);
368 if (ret) {
369 dev_err(master->dev, "Timeout when polling for COMPLETE\n");
370 return ret;
371 }
372
373 while (SVC_I3C_MSTATUS_RXPEND(readl(master->regs + SVC_I3C_MSTATUS)) &&
374 slot->len < SVC_I3C_FIFO_SIZE) {
375 mdatactrl = readl(master->regs + SVC_I3C_MDATACTRL);
376 count = SVC_I3C_MDATACTRL_RXCOUNT(mdatactrl);
377 readsl(master->regs + SVC_I3C_MRDATAB, buf, count);
378 slot->len += count;
379 buf += count;
380 }
381
382 master->ibi.tbq_slot = slot;
383
384 return 0;
385 }
386
svc_i3c_master_ack_ibi(struct svc_i3c_master * master,bool mandatory_byte)387 static void svc_i3c_master_ack_ibi(struct svc_i3c_master *master,
388 bool mandatory_byte)
389 {
390 unsigned int ibi_ack_nack;
391
392 ibi_ack_nack = SVC_I3C_MCTRL_REQUEST_IBI_ACKNACK;
393 if (mandatory_byte)
394 ibi_ack_nack |= SVC_I3C_MCTRL_IBIRESP_ACK_WITH_BYTE;
395 else
396 ibi_ack_nack |= SVC_I3C_MCTRL_IBIRESP_ACK_WITHOUT_BYTE;
397
398 writel(ibi_ack_nack, master->regs + SVC_I3C_MCTRL);
399 }
400
svc_i3c_master_nack_ibi(struct svc_i3c_master * master)401 static void svc_i3c_master_nack_ibi(struct svc_i3c_master *master)
402 {
403 writel(SVC_I3C_MCTRL_REQUEST_IBI_ACKNACK |
404 SVC_I3C_MCTRL_IBIRESP_NACK,
405 master->regs + SVC_I3C_MCTRL);
406 }
407
svc_i3c_master_ibi_work(struct work_struct * work)408 static void svc_i3c_master_ibi_work(struct work_struct *work)
409 {
410 struct svc_i3c_master *master = container_of(work, struct svc_i3c_master, ibi_work);
411 struct svc_i3c_i2c_dev_data *data;
412 unsigned int ibitype, ibiaddr;
413 struct i3c_dev_desc *dev;
414 u32 status, val;
415 int ret;
416
417 mutex_lock(&master->lock);
418 /*
419 * IBIWON may be set before SVC_I3C_MCTRL_REQUEST_AUTO_IBI, causing
420 * readl_relaxed_poll_timeout() to return immediately. Consequently,
421 * ibitype will be 0 since it was last updated only after the 8th SCL
422 * cycle, leading to missed client IBI handlers.
423 *
424 * A typical scenario is when IBIWON occurs and bus arbitration is lost
425 * at svc_i3c_master_priv_xfers().
426 *
427 * Clear SVC_I3C_MINT_IBIWON before sending SVC_I3C_MCTRL_REQUEST_AUTO_IBI.
428 */
429 writel(SVC_I3C_MINT_IBIWON, master->regs + SVC_I3C_MSTATUS);
430
431 /* Acknowledge the incoming interrupt with the AUTOIBI mechanism */
432 writel(SVC_I3C_MCTRL_REQUEST_AUTO_IBI |
433 SVC_I3C_MCTRL_IBIRESP_AUTO,
434 master->regs + SVC_I3C_MCTRL);
435
436 /* Wait for IBIWON, should take approximately 100us */
437 ret = readl_relaxed_poll_timeout(master->regs + SVC_I3C_MSTATUS, val,
438 SVC_I3C_MSTATUS_IBIWON(val), 0, 1000);
439 if (ret) {
440 dev_err(master->dev, "Timeout when polling for IBIWON\n");
441 svc_i3c_master_emit_stop(master);
442 goto reenable_ibis;
443 }
444
445 status = readl(master->regs + SVC_I3C_MSTATUS);
446 ibitype = SVC_I3C_MSTATUS_IBITYPE(status);
447 ibiaddr = SVC_I3C_MSTATUS_IBIADDR(status);
448
449 /* Handle the critical responses to IBI's */
450 switch (ibitype) {
451 case SVC_I3C_MSTATUS_IBITYPE_IBI:
452 dev = svc_i3c_master_dev_from_addr(master, ibiaddr);
453 if (!dev || !is_events_enabled(master, SVC_I3C_EVENT_IBI))
454 svc_i3c_master_nack_ibi(master);
455 else
456 svc_i3c_master_handle_ibi(master, dev);
457 break;
458 case SVC_I3C_MSTATUS_IBITYPE_HOT_JOIN:
459 if (is_events_enabled(master, SVC_I3C_EVENT_HOTJOIN))
460 svc_i3c_master_ack_ibi(master, false);
461 else
462 svc_i3c_master_nack_ibi(master);
463 break;
464 case SVC_I3C_MSTATUS_IBITYPE_MASTER_REQUEST:
465 svc_i3c_master_nack_ibi(master);
466 break;
467 default:
468 break;
469 }
470
471 /*
472 * If an error happened, we probably got interrupted and the exchange
473 * timedout. In this case we just drop everything, emit a stop and wait
474 * for the slave to interrupt again.
475 */
476 if (svc_i3c_master_error(master)) {
477 if (master->ibi.tbq_slot) {
478 data = i3c_dev_get_master_data(dev);
479 i3c_generic_ibi_recycle_slot(data->ibi_pool,
480 master->ibi.tbq_slot);
481 master->ibi.tbq_slot = NULL;
482 }
483
484 svc_i3c_master_emit_stop(master);
485
486 goto reenable_ibis;
487 }
488
489 /* Handle the non critical tasks */
490 switch (ibitype) {
491 case SVC_I3C_MSTATUS_IBITYPE_IBI:
492 if (dev) {
493 i3c_master_queue_ibi(dev, master->ibi.tbq_slot);
494 master->ibi.tbq_slot = NULL;
495 }
496 svc_i3c_master_emit_stop(master);
497 break;
498 case SVC_I3C_MSTATUS_IBITYPE_HOT_JOIN:
499 svc_i3c_master_emit_stop(master);
500 if (is_events_enabled(master, SVC_I3C_EVENT_HOTJOIN))
501 queue_work(master->base.wq, &master->hj_work);
502 break;
503 case SVC_I3C_MSTATUS_IBITYPE_MASTER_REQUEST:
504 default:
505 break;
506 }
507
508 reenable_ibis:
509 svc_i3c_master_enable_interrupts(master, SVC_I3C_MINT_SLVSTART);
510 mutex_unlock(&master->lock);
511 }
512
svc_i3c_master_irq_handler(int irq,void * dev_id)513 static irqreturn_t svc_i3c_master_irq_handler(int irq, void *dev_id)
514 {
515 struct svc_i3c_master *master = (struct svc_i3c_master *)dev_id;
516 u32 active = readl(master->regs + SVC_I3C_MSTATUS);
517
518 if (!SVC_I3C_MSTATUS_SLVSTART(active))
519 return IRQ_NONE;
520
521 /* Clear the interrupt status */
522 writel(SVC_I3C_MINT_SLVSTART, master->regs + SVC_I3C_MSTATUS);
523
524 svc_i3c_master_disable_interrupts(master);
525
526 /* Handle the interrupt in a non atomic context */
527 queue_work(master->base.wq, &master->ibi_work);
528
529 return IRQ_HANDLED;
530 }
531
svc_i3c_master_bus_init(struct i3c_master_controller * m)532 static int svc_i3c_master_bus_init(struct i3c_master_controller *m)
533 {
534 struct svc_i3c_master *master = to_svc_i3c_master(m);
535 struct i3c_bus *bus = i3c_master_get_bus(m);
536 struct i3c_device_info info = {};
537 unsigned long fclk_rate, fclk_period_ns;
538 unsigned int high_period_ns, od_low_period_ns;
539 u32 ppbaud, pplow, odhpp, odbaud, odstop, i2cbaud, reg;
540 int ret;
541
542 ret = pm_runtime_resume_and_get(master->dev);
543 if (ret < 0) {
544 dev_err(master->dev,
545 "<%s> cannot resume i3c bus master, err: %d\n",
546 __func__, ret);
547 return ret;
548 }
549
550 /* Timings derivation */
551 fclk_rate = clk_get_rate(master->fclk);
552 if (!fclk_rate) {
553 ret = -EINVAL;
554 goto rpm_out;
555 }
556
557 fclk_period_ns = DIV_ROUND_UP(1000000000, fclk_rate);
558
559 /*
560 * Using I3C Push-Pull mode, target is 12.5MHz/80ns period.
561 * Simplest configuration is using a 50% duty-cycle of 40ns.
562 */
563 ppbaud = DIV_ROUND_UP(40, fclk_period_ns) - 1;
564 pplow = 0;
565
566 /*
567 * Using I3C Open-Drain mode, target is 4.17MHz/240ns with a
568 * duty-cycle tuned so that high levels are filetered out by
569 * the 50ns filter (target being 40ns).
570 */
571 odhpp = 1;
572 high_period_ns = (ppbaud + 1) * fclk_period_ns;
573 odbaud = DIV_ROUND_UP(240 - high_period_ns, high_period_ns) - 1;
574 od_low_period_ns = (odbaud + 1) * high_period_ns;
575
576 switch (bus->mode) {
577 case I3C_BUS_MODE_PURE:
578 i2cbaud = 0;
579 odstop = 0;
580 break;
581 case I3C_BUS_MODE_MIXED_FAST:
582 case I3C_BUS_MODE_MIXED_LIMITED:
583 /*
584 * Using I2C Fm+ mode, target is 1MHz/1000ns, the difference
585 * between the high and low period does not really matter.
586 */
587 i2cbaud = DIV_ROUND_UP(1000, od_low_period_ns) - 2;
588 odstop = 1;
589 break;
590 case I3C_BUS_MODE_MIXED_SLOW:
591 /*
592 * Using I2C Fm mode, target is 0.4MHz/2500ns, with the same
593 * constraints as the FM+ mode.
594 */
595 i2cbaud = DIV_ROUND_UP(2500, od_low_period_ns) - 2;
596 odstop = 1;
597 break;
598 default:
599 goto rpm_out;
600 }
601
602 reg = SVC_I3C_MCONFIG_MASTER_EN |
603 SVC_I3C_MCONFIG_DISTO(0) |
604 SVC_I3C_MCONFIG_HKEEP(0) |
605 SVC_I3C_MCONFIG_ODSTOP(odstop) |
606 SVC_I3C_MCONFIG_PPBAUD(ppbaud) |
607 SVC_I3C_MCONFIG_PPLOW(pplow) |
608 SVC_I3C_MCONFIG_ODBAUD(odbaud) |
609 SVC_I3C_MCONFIG_ODHPP(odhpp) |
610 SVC_I3C_MCONFIG_SKEW(0) |
611 SVC_I3C_MCONFIG_I2CBAUD(i2cbaud);
612 writel(reg, master->regs + SVC_I3C_MCONFIG);
613
614 /* Master core's registration */
615 ret = i3c_master_get_free_addr(m, 0);
616 if (ret < 0)
617 goto rpm_out;
618
619 info.dyn_addr = ret;
620
621 writel(SVC_MDYNADDR_VALID | SVC_MDYNADDR_ADDR(info.dyn_addr),
622 master->regs + SVC_I3C_MDYNADDR);
623
624 ret = i3c_master_set_info(&master->base, &info);
625 if (ret)
626 goto rpm_out;
627
628 rpm_out:
629 pm_runtime_mark_last_busy(master->dev);
630 pm_runtime_put_autosuspend(master->dev);
631
632 return ret;
633 }
634
svc_i3c_master_bus_cleanup(struct i3c_master_controller * m)635 static void svc_i3c_master_bus_cleanup(struct i3c_master_controller *m)
636 {
637 struct svc_i3c_master *master = to_svc_i3c_master(m);
638 int ret;
639
640 ret = pm_runtime_resume_and_get(master->dev);
641 if (ret < 0) {
642 dev_err(master->dev, "<%s> Cannot get runtime PM.\n", __func__);
643 return;
644 }
645
646 svc_i3c_master_disable_interrupts(master);
647
648 /* Disable master */
649 writel(0, master->regs + SVC_I3C_MCONFIG);
650
651 pm_runtime_mark_last_busy(master->dev);
652 pm_runtime_put_autosuspend(master->dev);
653 }
654
svc_i3c_master_reserve_slot(struct svc_i3c_master * master)655 static int svc_i3c_master_reserve_slot(struct svc_i3c_master *master)
656 {
657 unsigned int slot;
658
659 if (!(master->free_slots & GENMASK(SVC_I3C_MAX_DEVS - 1, 0)))
660 return -ENOSPC;
661
662 slot = ffs(master->free_slots) - 1;
663
664 master->free_slots &= ~BIT(slot);
665
666 return slot;
667 }
668
svc_i3c_master_release_slot(struct svc_i3c_master * master,unsigned int slot)669 static void svc_i3c_master_release_slot(struct svc_i3c_master *master,
670 unsigned int slot)
671 {
672 master->free_slots |= BIT(slot);
673 }
674
svc_i3c_master_attach_i3c_dev(struct i3c_dev_desc * dev)675 static int svc_i3c_master_attach_i3c_dev(struct i3c_dev_desc *dev)
676 {
677 struct i3c_master_controller *m = i3c_dev_get_master(dev);
678 struct svc_i3c_master *master = to_svc_i3c_master(m);
679 struct svc_i3c_i2c_dev_data *data;
680 int slot;
681
682 slot = svc_i3c_master_reserve_slot(master);
683 if (slot < 0)
684 return slot;
685
686 data = kzalloc(sizeof(*data), GFP_KERNEL);
687 if (!data) {
688 svc_i3c_master_release_slot(master, slot);
689 return -ENOMEM;
690 }
691
692 data->ibi = -1;
693 data->index = slot;
694 master->addrs[slot] = dev->info.dyn_addr ? dev->info.dyn_addr :
695 dev->info.static_addr;
696 master->descs[slot] = dev;
697
698 i3c_dev_set_master_data(dev, data);
699
700 return 0;
701 }
702
svc_i3c_master_reattach_i3c_dev(struct i3c_dev_desc * dev,u8 old_dyn_addr)703 static int svc_i3c_master_reattach_i3c_dev(struct i3c_dev_desc *dev,
704 u8 old_dyn_addr)
705 {
706 struct i3c_master_controller *m = i3c_dev_get_master(dev);
707 struct svc_i3c_master *master = to_svc_i3c_master(m);
708 struct svc_i3c_i2c_dev_data *data = i3c_dev_get_master_data(dev);
709
710 master->addrs[data->index] = dev->info.dyn_addr ? dev->info.dyn_addr :
711 dev->info.static_addr;
712
713 return 0;
714 }
715
svc_i3c_master_detach_i3c_dev(struct i3c_dev_desc * dev)716 static void svc_i3c_master_detach_i3c_dev(struct i3c_dev_desc *dev)
717 {
718 struct svc_i3c_i2c_dev_data *data = i3c_dev_get_master_data(dev);
719 struct i3c_master_controller *m = i3c_dev_get_master(dev);
720 struct svc_i3c_master *master = to_svc_i3c_master(m);
721
722 master->addrs[data->index] = 0;
723 svc_i3c_master_release_slot(master, data->index);
724
725 kfree(data);
726 }
727
svc_i3c_master_attach_i2c_dev(struct i2c_dev_desc * dev)728 static int svc_i3c_master_attach_i2c_dev(struct i2c_dev_desc *dev)
729 {
730 struct i3c_master_controller *m = i2c_dev_get_master(dev);
731 struct svc_i3c_master *master = to_svc_i3c_master(m);
732 struct svc_i3c_i2c_dev_data *data;
733 int slot;
734
735 slot = svc_i3c_master_reserve_slot(master);
736 if (slot < 0)
737 return slot;
738
739 data = kzalloc(sizeof(*data), GFP_KERNEL);
740 if (!data) {
741 svc_i3c_master_release_slot(master, slot);
742 return -ENOMEM;
743 }
744
745 data->index = slot;
746 master->addrs[slot] = dev->addr;
747
748 i2c_dev_set_master_data(dev, data);
749
750 return 0;
751 }
752
svc_i3c_master_detach_i2c_dev(struct i2c_dev_desc * dev)753 static void svc_i3c_master_detach_i2c_dev(struct i2c_dev_desc *dev)
754 {
755 struct svc_i3c_i2c_dev_data *data = i2c_dev_get_master_data(dev);
756 struct i3c_master_controller *m = i2c_dev_get_master(dev);
757 struct svc_i3c_master *master = to_svc_i3c_master(m);
758
759 svc_i3c_master_release_slot(master, data->index);
760
761 kfree(data);
762 }
763
svc_i3c_master_readb(struct svc_i3c_master * master,u8 * dst,unsigned int len)764 static int svc_i3c_master_readb(struct svc_i3c_master *master, u8 *dst,
765 unsigned int len)
766 {
767 int ret, i;
768 u32 reg;
769
770 for (i = 0; i < len; i++) {
771 ret = readl_poll_timeout_atomic(master->regs + SVC_I3C_MSTATUS,
772 reg,
773 SVC_I3C_MSTATUS_RXPEND(reg),
774 0, 1000);
775 if (ret)
776 return ret;
777
778 dst[i] = readl(master->regs + SVC_I3C_MRDATAB);
779 }
780
781 return 0;
782 }
783
svc_i3c_master_do_daa_locked(struct svc_i3c_master * master,u8 * addrs,unsigned int * count)784 static int svc_i3c_master_do_daa_locked(struct svc_i3c_master *master,
785 u8 *addrs, unsigned int *count)
786 {
787 u64 prov_id[SVC_I3C_MAX_DEVS] = {}, nacking_prov_id = 0;
788 unsigned int dev_nb = 0, last_addr = 0;
789 u32 reg;
790 int ret, i;
791
792 while (true) {
793 /* Enter/proceed with DAA */
794 writel(SVC_I3C_MCTRL_REQUEST_PROC_DAA |
795 SVC_I3C_MCTRL_TYPE_I3C |
796 SVC_I3C_MCTRL_IBIRESP_NACK |
797 SVC_I3C_MCTRL_DIR(SVC_I3C_MCTRL_DIR_WRITE),
798 master->regs + SVC_I3C_MCTRL);
799
800 /*
801 * Either one slave will send its ID, or the assignment process
802 * is done.
803 */
804 ret = readl_poll_timeout_atomic(master->regs + SVC_I3C_MSTATUS,
805 reg,
806 SVC_I3C_MSTATUS_RXPEND(reg) |
807 SVC_I3C_MSTATUS_MCTRLDONE(reg),
808 1, 1000);
809 if (ret)
810 return ret;
811
812 if (SVC_I3C_MSTATUS_RXPEND(reg)) {
813 u8 data[6];
814
815 /*
816 * We only care about the 48-bit provisioned ID yet to
817 * be sure a device does not nack an address twice.
818 * Otherwise, we would just need to flush the RX FIFO.
819 */
820 ret = svc_i3c_master_readb(master, data, 6);
821 if (ret)
822 return ret;
823
824 for (i = 0; i < 6; i++)
825 prov_id[dev_nb] |= (u64)(data[i]) << (8 * (5 - i));
826
827 /* We do not care about the BCR and DCR yet */
828 ret = svc_i3c_master_readb(master, data, 2);
829 if (ret)
830 return ret;
831 } else if (SVC_I3C_MSTATUS_MCTRLDONE(reg)) {
832 if (SVC_I3C_MSTATUS_STATE_IDLE(reg) &&
833 SVC_I3C_MSTATUS_COMPLETE(reg)) {
834 /*
835 * All devices received and acked they dynamic
836 * address, this is the natural end of the DAA
837 * procedure.
838 */
839 break;
840 } else if (SVC_I3C_MSTATUS_NACKED(reg)) {
841 /* No I3C devices attached */
842 if (dev_nb == 0)
843 break;
844
845 /*
846 * A slave device nacked the address, this is
847 * allowed only once, DAA will be stopped and
848 * then resumed. The same device is supposed to
849 * answer again immediately and shall ack the
850 * address this time.
851 */
852 if (prov_id[dev_nb] == nacking_prov_id)
853 return -EIO;
854
855 dev_nb--;
856 nacking_prov_id = prov_id[dev_nb];
857 svc_i3c_master_emit_stop(master);
858
859 continue;
860 } else {
861 return -EIO;
862 }
863 }
864
865 /* Wait for the slave to be ready to receive its address */
866 ret = readl_poll_timeout_atomic(master->regs + SVC_I3C_MSTATUS,
867 reg,
868 SVC_I3C_MSTATUS_MCTRLDONE(reg) &&
869 SVC_I3C_MSTATUS_STATE_DAA(reg) &&
870 SVC_I3C_MSTATUS_BETWEEN(reg),
871 0, 1000);
872 if (ret)
873 return ret;
874
875 /* Give the slave device a suitable dynamic address */
876 ret = i3c_master_get_free_addr(&master->base, last_addr + 1);
877 if (ret < 0)
878 return ret;
879
880 addrs[dev_nb] = ret;
881 dev_dbg(master->dev, "DAA: device %d assigned to 0x%02x\n",
882 dev_nb, addrs[dev_nb]);
883
884 writel(addrs[dev_nb], master->regs + SVC_I3C_MWDATAB);
885 last_addr = addrs[dev_nb++];
886 }
887
888 *count = dev_nb;
889
890 return 0;
891 }
892
svc_i3c_update_ibirules(struct svc_i3c_master * master)893 static int svc_i3c_update_ibirules(struct svc_i3c_master *master)
894 {
895 struct i3c_dev_desc *dev;
896 u32 reg_mbyte = 0, reg_nobyte = SVC_I3C_IBIRULES_NOBYTE;
897 unsigned int mbyte_addr_ok = 0, mbyte_addr_ko = 0, nobyte_addr_ok = 0,
898 nobyte_addr_ko = 0;
899 bool list_mbyte = false, list_nobyte = false;
900
901 /* Create the IBIRULES register for both cases */
902 i3c_bus_for_each_i3cdev(&master->base.bus, dev) {
903 if (I3C_BCR_DEVICE_ROLE(dev->info.bcr) == I3C_BCR_I3C_MASTER)
904 continue;
905
906 if (dev->info.bcr & I3C_BCR_IBI_PAYLOAD) {
907 reg_mbyte |= SVC_I3C_IBIRULES_ADDR(mbyte_addr_ok,
908 dev->info.dyn_addr);
909
910 /* IBI rules cannot be applied to devices with MSb=1 */
911 if (dev->info.dyn_addr & BIT(7))
912 mbyte_addr_ko++;
913 else
914 mbyte_addr_ok++;
915 } else {
916 reg_nobyte |= SVC_I3C_IBIRULES_ADDR(nobyte_addr_ok,
917 dev->info.dyn_addr);
918
919 /* IBI rules cannot be applied to devices with MSb=1 */
920 if (dev->info.dyn_addr & BIT(7))
921 nobyte_addr_ko++;
922 else
923 nobyte_addr_ok++;
924 }
925 }
926
927 /* Device list cannot be handled by hardware */
928 if (!mbyte_addr_ko && mbyte_addr_ok <= SVC_I3C_IBIRULES_ADDRS)
929 list_mbyte = true;
930
931 if (!nobyte_addr_ko && nobyte_addr_ok <= SVC_I3C_IBIRULES_ADDRS)
932 list_nobyte = true;
933
934 /* No list can be properly handled, return an error */
935 if (!list_mbyte && !list_nobyte)
936 return -ERANGE;
937
938 /* Pick the first list that can be handled by hardware, randomly */
939 if (list_mbyte)
940 writel(reg_mbyte, master->regs + SVC_I3C_IBIRULES);
941 else
942 writel(reg_nobyte, master->regs + SVC_I3C_IBIRULES);
943
944 return 0;
945 }
946
svc_i3c_master_do_daa(struct i3c_master_controller * m)947 static int svc_i3c_master_do_daa(struct i3c_master_controller *m)
948 {
949 struct svc_i3c_master *master = to_svc_i3c_master(m);
950 u8 addrs[SVC_I3C_MAX_DEVS];
951 unsigned long flags;
952 unsigned int dev_nb;
953 int ret, i;
954
955 ret = pm_runtime_resume_and_get(master->dev);
956 if (ret < 0) {
957 dev_err(master->dev, "<%s> Cannot get runtime PM.\n", __func__);
958 return ret;
959 }
960
961 spin_lock_irqsave(&master->xferqueue.lock, flags);
962 ret = svc_i3c_master_do_daa_locked(master, addrs, &dev_nb);
963 spin_unlock_irqrestore(&master->xferqueue.lock, flags);
964 if (ret) {
965 svc_i3c_master_emit_stop(master);
966 svc_i3c_master_clear_merrwarn(master);
967 goto rpm_out;
968 }
969
970 /* Register all devices who participated to the core */
971 for (i = 0; i < dev_nb; i++) {
972 ret = i3c_master_add_i3c_dev_locked(m, addrs[i]);
973 if (ret)
974 goto rpm_out;
975 }
976
977 /* Configure IBI auto-rules */
978 ret = svc_i3c_update_ibirules(master);
979 if (ret)
980 dev_err(master->dev, "Cannot handle such a list of devices");
981
982 rpm_out:
983 pm_runtime_mark_last_busy(master->dev);
984 pm_runtime_put_autosuspend(master->dev);
985
986 return ret;
987 }
988
svc_i3c_master_read(struct svc_i3c_master * master,u8 * in,unsigned int len)989 static int svc_i3c_master_read(struct svc_i3c_master *master,
990 u8 *in, unsigned int len)
991 {
992 int offset = 0, i;
993 u32 mdctrl, mstatus;
994 bool completed = false;
995 unsigned int count;
996 unsigned long start = jiffies;
997
998 while (!completed) {
999 mstatus = readl(master->regs + SVC_I3C_MSTATUS);
1000 if (SVC_I3C_MSTATUS_COMPLETE(mstatus) != 0)
1001 completed = true;
1002
1003 if (time_after(jiffies, start + msecs_to_jiffies(1000))) {
1004 dev_dbg(master->dev, "I3C read timeout\n");
1005 return -ETIMEDOUT;
1006 }
1007
1008 mdctrl = readl(master->regs + SVC_I3C_MDATACTRL);
1009 count = SVC_I3C_MDATACTRL_RXCOUNT(mdctrl);
1010 if (offset + count > len) {
1011 dev_err(master->dev, "I3C receive length too long!\n");
1012 return -EINVAL;
1013 }
1014 for (i = 0; i < count; i++)
1015 in[offset + i] = readl(master->regs + SVC_I3C_MRDATAB);
1016
1017 offset += count;
1018 }
1019
1020 return offset;
1021 }
1022
svc_i3c_master_write(struct svc_i3c_master * master,const u8 * out,unsigned int len)1023 static int svc_i3c_master_write(struct svc_i3c_master *master,
1024 const u8 *out, unsigned int len)
1025 {
1026 int offset = 0, ret;
1027 u32 mdctrl;
1028
1029 while (offset < len) {
1030 ret = readl_poll_timeout(master->regs + SVC_I3C_MDATACTRL,
1031 mdctrl,
1032 !(mdctrl & SVC_I3C_MDATACTRL_TXFULL),
1033 0, 1000);
1034 if (ret)
1035 return ret;
1036
1037 /*
1038 * The last byte to be sent over the bus must either have the
1039 * "end" bit set or be written in MWDATABE.
1040 */
1041 if (likely(offset < (len - 1)))
1042 writel(out[offset++], master->regs + SVC_I3C_MWDATAB);
1043 else
1044 writel(out[offset++], master->regs + SVC_I3C_MWDATABE);
1045 }
1046
1047 return 0;
1048 }
1049
svc_i3c_master_xfer(struct svc_i3c_master * master,bool rnw,unsigned int xfer_type,u8 addr,u8 * in,const u8 * out,unsigned int xfer_len,unsigned int * actual_len,bool continued)1050 static int svc_i3c_master_xfer(struct svc_i3c_master *master,
1051 bool rnw, unsigned int xfer_type, u8 addr,
1052 u8 *in, const u8 *out, unsigned int xfer_len,
1053 unsigned int *actual_len, bool continued)
1054 {
1055 u32 reg;
1056 int ret;
1057
1058 /* clean SVC_I3C_MINT_IBIWON w1c bits */
1059 writel(SVC_I3C_MINT_IBIWON, master->regs + SVC_I3C_MSTATUS);
1060
1061 writel(SVC_I3C_MCTRL_REQUEST_START_ADDR |
1062 xfer_type |
1063 SVC_I3C_MCTRL_IBIRESP_NACK |
1064 SVC_I3C_MCTRL_DIR(rnw) |
1065 SVC_I3C_MCTRL_ADDR(addr) |
1066 SVC_I3C_MCTRL_RDTERM(*actual_len),
1067 master->regs + SVC_I3C_MCTRL);
1068
1069 ret = readl_poll_timeout(master->regs + SVC_I3C_MSTATUS, reg,
1070 SVC_I3C_MSTATUS_MCTRLDONE(reg), 0, 1000);
1071 if (ret)
1072 goto emit_stop;
1073
1074 if (readl(master->regs + SVC_I3C_MERRWARN) & SVC_I3C_MERRWARN_NACK) {
1075 ret = -ENXIO;
1076 *actual_len = 0;
1077 goto emit_stop;
1078 }
1079
1080 /*
1081 * According to I3C spec ver 1.1.1, 5.1.2.2.3 Consequence of Controller Starting a Frame
1082 * with I3C Target Address.
1083 *
1084 * The I3C Controller normally should start a Frame, the Address may be arbitrated, and so
1085 * the Controller shall monitor to see whether an In-Band Interrupt request, a Controller
1086 * Role Request (i.e., Secondary Controller requests to become the Active Controller), or
1087 * a Hot-Join Request has been made.
1088 *
1089 * If missed IBIWON check, the wrong data will be return. When IBIWON happen, return failure
1090 * and yield the above events handler.
1091 */
1092 if (SVC_I3C_MSTATUS_IBIWON(reg)) {
1093 ret = -EAGAIN;
1094 *actual_len = 0;
1095 goto emit_stop;
1096 }
1097
1098 if (rnw)
1099 ret = svc_i3c_master_read(master, in, xfer_len);
1100 else
1101 ret = svc_i3c_master_write(master, out, xfer_len);
1102 if (ret < 0)
1103 goto emit_stop;
1104
1105 if (rnw)
1106 *actual_len = ret;
1107
1108 ret = readl_poll_timeout(master->regs + SVC_I3C_MSTATUS, reg,
1109 SVC_I3C_MSTATUS_COMPLETE(reg), 0, 1000);
1110 if (ret)
1111 goto emit_stop;
1112
1113 writel(SVC_I3C_MINT_COMPLETE, master->regs + SVC_I3C_MSTATUS);
1114
1115 if (!continued) {
1116 svc_i3c_master_emit_stop(master);
1117
1118 /* Wait idle if stop is sent. */
1119 readl_poll_timeout(master->regs + SVC_I3C_MSTATUS, reg,
1120 SVC_I3C_MSTATUS_STATE_IDLE(reg), 0, 1000);
1121 }
1122
1123 return 0;
1124
1125 emit_stop:
1126 svc_i3c_master_emit_stop(master);
1127 svc_i3c_master_clear_merrwarn(master);
1128
1129 return ret;
1130 }
1131
1132 static struct svc_i3c_xfer *
svc_i3c_master_alloc_xfer(struct svc_i3c_master * master,unsigned int ncmds)1133 svc_i3c_master_alloc_xfer(struct svc_i3c_master *master, unsigned int ncmds)
1134 {
1135 struct svc_i3c_xfer *xfer;
1136
1137 xfer = kzalloc(struct_size(xfer, cmds, ncmds), GFP_KERNEL);
1138 if (!xfer)
1139 return NULL;
1140
1141 INIT_LIST_HEAD(&xfer->node);
1142 xfer->ncmds = ncmds;
1143 xfer->ret = -ETIMEDOUT;
1144
1145 return xfer;
1146 }
1147
svc_i3c_master_free_xfer(struct svc_i3c_xfer * xfer)1148 static void svc_i3c_master_free_xfer(struct svc_i3c_xfer *xfer)
1149 {
1150 kfree(xfer);
1151 }
1152
svc_i3c_master_dequeue_xfer_locked(struct svc_i3c_master * master,struct svc_i3c_xfer * xfer)1153 static void svc_i3c_master_dequeue_xfer_locked(struct svc_i3c_master *master,
1154 struct svc_i3c_xfer *xfer)
1155 {
1156 if (master->xferqueue.cur == xfer)
1157 master->xferqueue.cur = NULL;
1158 else
1159 list_del_init(&xfer->node);
1160 }
1161
svc_i3c_master_dequeue_xfer(struct svc_i3c_master * master,struct svc_i3c_xfer * xfer)1162 static void svc_i3c_master_dequeue_xfer(struct svc_i3c_master *master,
1163 struct svc_i3c_xfer *xfer)
1164 {
1165 unsigned long flags;
1166
1167 spin_lock_irqsave(&master->xferqueue.lock, flags);
1168 svc_i3c_master_dequeue_xfer_locked(master, xfer);
1169 spin_unlock_irqrestore(&master->xferqueue.lock, flags);
1170 }
1171
svc_i3c_master_start_xfer_locked(struct svc_i3c_master * master)1172 static void svc_i3c_master_start_xfer_locked(struct svc_i3c_master *master)
1173 {
1174 struct svc_i3c_xfer *xfer = master->xferqueue.cur;
1175 int ret, i;
1176
1177 if (!xfer)
1178 return;
1179
1180 svc_i3c_master_clear_merrwarn(master);
1181 svc_i3c_master_flush_fifo(master);
1182
1183 for (i = 0; i < xfer->ncmds; i++) {
1184 struct svc_i3c_cmd *cmd = &xfer->cmds[i];
1185
1186 ret = svc_i3c_master_xfer(master, cmd->rnw, xfer->type,
1187 cmd->addr, cmd->in, cmd->out,
1188 cmd->len, &cmd->actual_len,
1189 cmd->continued);
1190 /* cmd->xfer is NULL if I2C or CCC transfer */
1191 if (cmd->xfer)
1192 cmd->xfer->actual_len = cmd->actual_len;
1193
1194 if (ret)
1195 break;
1196 }
1197
1198 xfer->ret = ret;
1199 complete(&xfer->comp);
1200
1201 if (ret < 0)
1202 svc_i3c_master_dequeue_xfer_locked(master, xfer);
1203
1204 xfer = list_first_entry_or_null(&master->xferqueue.list,
1205 struct svc_i3c_xfer,
1206 node);
1207 if (xfer)
1208 list_del_init(&xfer->node);
1209
1210 master->xferqueue.cur = xfer;
1211 svc_i3c_master_start_xfer_locked(master);
1212 }
1213
svc_i3c_master_enqueue_xfer(struct svc_i3c_master * master,struct svc_i3c_xfer * xfer)1214 static void svc_i3c_master_enqueue_xfer(struct svc_i3c_master *master,
1215 struct svc_i3c_xfer *xfer)
1216 {
1217 unsigned long flags;
1218 int ret;
1219
1220 ret = pm_runtime_resume_and_get(master->dev);
1221 if (ret < 0) {
1222 dev_err(master->dev, "<%s> Cannot get runtime PM.\n", __func__);
1223 return;
1224 }
1225
1226 init_completion(&xfer->comp);
1227 spin_lock_irqsave(&master->xferqueue.lock, flags);
1228 if (master->xferqueue.cur) {
1229 list_add_tail(&xfer->node, &master->xferqueue.list);
1230 } else {
1231 master->xferqueue.cur = xfer;
1232 svc_i3c_master_start_xfer_locked(master);
1233 }
1234 spin_unlock_irqrestore(&master->xferqueue.lock, flags);
1235
1236 pm_runtime_mark_last_busy(master->dev);
1237 pm_runtime_put_autosuspend(master->dev);
1238 }
1239
1240 static bool
svc_i3c_master_supports_ccc_cmd(struct i3c_master_controller * master,const struct i3c_ccc_cmd * cmd)1241 svc_i3c_master_supports_ccc_cmd(struct i3c_master_controller *master,
1242 const struct i3c_ccc_cmd *cmd)
1243 {
1244 /* No software support for CCC commands targeting more than one slave */
1245 return (cmd->ndests == 1);
1246 }
1247
svc_i3c_master_send_bdcast_ccc_cmd(struct svc_i3c_master * master,struct i3c_ccc_cmd * ccc)1248 static int svc_i3c_master_send_bdcast_ccc_cmd(struct svc_i3c_master *master,
1249 struct i3c_ccc_cmd *ccc)
1250 {
1251 unsigned int xfer_len = ccc->dests[0].payload.len + 1;
1252 struct svc_i3c_xfer *xfer;
1253 struct svc_i3c_cmd *cmd;
1254 u8 *buf;
1255 int ret;
1256
1257 xfer = svc_i3c_master_alloc_xfer(master, 1);
1258 if (!xfer)
1259 return -ENOMEM;
1260
1261 buf = kmalloc(xfer_len, GFP_KERNEL);
1262 if (!buf) {
1263 svc_i3c_master_free_xfer(xfer);
1264 return -ENOMEM;
1265 }
1266
1267 buf[0] = ccc->id;
1268 memcpy(&buf[1], ccc->dests[0].payload.data, ccc->dests[0].payload.len);
1269
1270 xfer->type = SVC_I3C_MCTRL_TYPE_I3C;
1271
1272 cmd = &xfer->cmds[0];
1273 cmd->addr = ccc->dests[0].addr;
1274 cmd->rnw = ccc->rnw;
1275 cmd->in = NULL;
1276 cmd->out = buf;
1277 cmd->len = xfer_len;
1278 cmd->actual_len = 0;
1279 cmd->continued = false;
1280
1281 mutex_lock(&master->lock);
1282 svc_i3c_master_enqueue_xfer(master, xfer);
1283 if (!wait_for_completion_timeout(&xfer->comp, msecs_to_jiffies(1000)))
1284 svc_i3c_master_dequeue_xfer(master, xfer);
1285 mutex_unlock(&master->lock);
1286
1287 ret = xfer->ret;
1288 kfree(buf);
1289 svc_i3c_master_free_xfer(xfer);
1290
1291 return ret;
1292 }
1293
svc_i3c_master_send_direct_ccc_cmd(struct svc_i3c_master * master,struct i3c_ccc_cmd * ccc)1294 static int svc_i3c_master_send_direct_ccc_cmd(struct svc_i3c_master *master,
1295 struct i3c_ccc_cmd *ccc)
1296 {
1297 unsigned int xfer_len = ccc->dests[0].payload.len;
1298 unsigned int actual_len = ccc->rnw ? xfer_len : 0;
1299 struct svc_i3c_xfer *xfer;
1300 struct svc_i3c_cmd *cmd;
1301 int ret;
1302
1303 xfer = svc_i3c_master_alloc_xfer(master, 2);
1304 if (!xfer)
1305 return -ENOMEM;
1306
1307 xfer->type = SVC_I3C_MCTRL_TYPE_I3C;
1308
1309 /* Broadcasted message */
1310 cmd = &xfer->cmds[0];
1311 cmd->addr = I3C_BROADCAST_ADDR;
1312 cmd->rnw = 0;
1313 cmd->in = NULL;
1314 cmd->out = &ccc->id;
1315 cmd->len = 1;
1316 cmd->actual_len = 0;
1317 cmd->continued = true;
1318
1319 /* Directed message */
1320 cmd = &xfer->cmds[1];
1321 cmd->addr = ccc->dests[0].addr;
1322 cmd->rnw = ccc->rnw;
1323 cmd->in = ccc->rnw ? ccc->dests[0].payload.data : NULL;
1324 cmd->out = ccc->rnw ? NULL : ccc->dests[0].payload.data,
1325 cmd->len = xfer_len;
1326 cmd->actual_len = actual_len;
1327 cmd->continued = false;
1328
1329 mutex_lock(&master->lock);
1330 svc_i3c_master_enqueue_xfer(master, xfer);
1331 if (!wait_for_completion_timeout(&xfer->comp, msecs_to_jiffies(1000)))
1332 svc_i3c_master_dequeue_xfer(master, xfer);
1333 mutex_unlock(&master->lock);
1334
1335 if (cmd->actual_len != xfer_len)
1336 ccc->dests[0].payload.len = cmd->actual_len;
1337
1338 ret = xfer->ret;
1339 svc_i3c_master_free_xfer(xfer);
1340
1341 return ret;
1342 }
1343
svc_i3c_master_send_ccc_cmd(struct i3c_master_controller * m,struct i3c_ccc_cmd * cmd)1344 static int svc_i3c_master_send_ccc_cmd(struct i3c_master_controller *m,
1345 struct i3c_ccc_cmd *cmd)
1346 {
1347 struct svc_i3c_master *master = to_svc_i3c_master(m);
1348 bool broadcast = cmd->id < 0x80;
1349 int ret;
1350
1351 if (broadcast)
1352 ret = svc_i3c_master_send_bdcast_ccc_cmd(master, cmd);
1353 else
1354 ret = svc_i3c_master_send_direct_ccc_cmd(master, cmd);
1355
1356 if (ret)
1357 cmd->err = I3C_ERROR_M2;
1358
1359 return ret;
1360 }
1361
svc_i3c_master_priv_xfers(struct i3c_dev_desc * dev,struct i3c_priv_xfer * xfers,int nxfers)1362 static int svc_i3c_master_priv_xfers(struct i3c_dev_desc *dev,
1363 struct i3c_priv_xfer *xfers,
1364 int nxfers)
1365 {
1366 struct i3c_master_controller *m = i3c_dev_get_master(dev);
1367 struct svc_i3c_master *master = to_svc_i3c_master(m);
1368 struct svc_i3c_i2c_dev_data *data = i3c_dev_get_master_data(dev);
1369 struct svc_i3c_xfer *xfer;
1370 int ret, i;
1371
1372 xfer = svc_i3c_master_alloc_xfer(master, nxfers);
1373 if (!xfer)
1374 return -ENOMEM;
1375
1376 xfer->type = SVC_I3C_MCTRL_TYPE_I3C;
1377
1378 for (i = 0; i < nxfers; i++) {
1379 struct svc_i3c_cmd *cmd = &xfer->cmds[i];
1380
1381 cmd->xfer = &xfers[i];
1382 cmd->addr = master->addrs[data->index];
1383 cmd->rnw = xfers[i].rnw;
1384 cmd->in = xfers[i].rnw ? xfers[i].data.in : NULL;
1385 cmd->out = xfers[i].rnw ? NULL : xfers[i].data.out;
1386 cmd->len = xfers[i].len;
1387 cmd->actual_len = xfers[i].rnw ? xfers[i].len : 0;
1388 cmd->continued = (i + 1) < nxfers;
1389 }
1390
1391 mutex_lock(&master->lock);
1392 svc_i3c_master_enqueue_xfer(master, xfer);
1393 if (!wait_for_completion_timeout(&xfer->comp, msecs_to_jiffies(1000)))
1394 svc_i3c_master_dequeue_xfer(master, xfer);
1395 mutex_unlock(&master->lock);
1396
1397 ret = xfer->ret;
1398 svc_i3c_master_free_xfer(xfer);
1399
1400 return ret;
1401 }
1402
svc_i3c_master_i2c_xfers(struct i2c_dev_desc * dev,const struct i2c_msg * xfers,int nxfers)1403 static int svc_i3c_master_i2c_xfers(struct i2c_dev_desc *dev,
1404 const struct i2c_msg *xfers,
1405 int nxfers)
1406 {
1407 struct i3c_master_controller *m = i2c_dev_get_master(dev);
1408 struct svc_i3c_master *master = to_svc_i3c_master(m);
1409 struct svc_i3c_i2c_dev_data *data = i2c_dev_get_master_data(dev);
1410 struct svc_i3c_xfer *xfer;
1411 int ret, i;
1412
1413 xfer = svc_i3c_master_alloc_xfer(master, nxfers);
1414 if (!xfer)
1415 return -ENOMEM;
1416
1417 xfer->type = SVC_I3C_MCTRL_TYPE_I2C;
1418
1419 for (i = 0; i < nxfers; i++) {
1420 struct svc_i3c_cmd *cmd = &xfer->cmds[i];
1421
1422 cmd->addr = master->addrs[data->index];
1423 cmd->rnw = xfers[i].flags & I2C_M_RD;
1424 cmd->in = cmd->rnw ? xfers[i].buf : NULL;
1425 cmd->out = cmd->rnw ? NULL : xfers[i].buf;
1426 cmd->len = xfers[i].len;
1427 cmd->actual_len = cmd->rnw ? xfers[i].len : 0;
1428 cmd->continued = (i + 1 < nxfers);
1429 }
1430
1431 mutex_lock(&master->lock);
1432 svc_i3c_master_enqueue_xfer(master, xfer);
1433 if (!wait_for_completion_timeout(&xfer->comp, msecs_to_jiffies(1000)))
1434 svc_i3c_master_dequeue_xfer(master, xfer);
1435 mutex_unlock(&master->lock);
1436
1437 ret = xfer->ret;
1438 svc_i3c_master_free_xfer(xfer);
1439
1440 return ret;
1441 }
1442
svc_i3c_master_request_ibi(struct i3c_dev_desc * dev,const struct i3c_ibi_setup * req)1443 static int svc_i3c_master_request_ibi(struct i3c_dev_desc *dev,
1444 const struct i3c_ibi_setup *req)
1445 {
1446 struct i3c_master_controller *m = i3c_dev_get_master(dev);
1447 struct svc_i3c_master *master = to_svc_i3c_master(m);
1448 struct svc_i3c_i2c_dev_data *data = i3c_dev_get_master_data(dev);
1449 unsigned long flags;
1450 unsigned int i;
1451
1452 if (dev->ibi->max_payload_len > SVC_I3C_FIFO_SIZE) {
1453 dev_err(master->dev, "IBI max payload %d should be < %d\n",
1454 dev->ibi->max_payload_len, SVC_I3C_FIFO_SIZE);
1455 return -ERANGE;
1456 }
1457
1458 data->ibi_pool = i3c_generic_ibi_alloc_pool(dev, req);
1459 if (IS_ERR(data->ibi_pool))
1460 return PTR_ERR(data->ibi_pool);
1461
1462 spin_lock_irqsave(&master->ibi.lock, flags);
1463 for (i = 0; i < master->ibi.num_slots; i++) {
1464 if (!master->ibi.slots[i]) {
1465 data->ibi = i;
1466 master->ibi.slots[i] = dev;
1467 break;
1468 }
1469 }
1470 spin_unlock_irqrestore(&master->ibi.lock, flags);
1471
1472 if (i < master->ibi.num_slots)
1473 return 0;
1474
1475 i3c_generic_ibi_free_pool(data->ibi_pool);
1476 data->ibi_pool = NULL;
1477
1478 return -ENOSPC;
1479 }
1480
svc_i3c_master_free_ibi(struct i3c_dev_desc * dev)1481 static void svc_i3c_master_free_ibi(struct i3c_dev_desc *dev)
1482 {
1483 struct i3c_master_controller *m = i3c_dev_get_master(dev);
1484 struct svc_i3c_master *master = to_svc_i3c_master(m);
1485 struct svc_i3c_i2c_dev_data *data = i3c_dev_get_master_data(dev);
1486 unsigned long flags;
1487
1488 spin_lock_irqsave(&master->ibi.lock, flags);
1489 master->ibi.slots[data->ibi] = NULL;
1490 data->ibi = -1;
1491 spin_unlock_irqrestore(&master->ibi.lock, flags);
1492
1493 i3c_generic_ibi_free_pool(data->ibi_pool);
1494 }
1495
svc_i3c_master_enable_ibi(struct i3c_dev_desc * dev)1496 static int svc_i3c_master_enable_ibi(struct i3c_dev_desc *dev)
1497 {
1498 struct i3c_master_controller *m = i3c_dev_get_master(dev);
1499 struct svc_i3c_master *master = to_svc_i3c_master(m);
1500 int ret;
1501
1502 ret = pm_runtime_resume_and_get(master->dev);
1503 if (ret < 0) {
1504 dev_err(master->dev, "<%s> Cannot get runtime PM.\n", __func__);
1505 return ret;
1506 }
1507
1508 master->enabled_events |= SVC_I3C_EVENT_IBI;
1509 svc_i3c_master_enable_interrupts(master, SVC_I3C_MINT_SLVSTART);
1510
1511 return i3c_master_enec_locked(m, dev->info.dyn_addr, I3C_CCC_EVENT_SIR);
1512 }
1513
svc_i3c_master_disable_ibi(struct i3c_dev_desc * dev)1514 static int svc_i3c_master_disable_ibi(struct i3c_dev_desc *dev)
1515 {
1516 struct i3c_master_controller *m = i3c_dev_get_master(dev);
1517 struct svc_i3c_master *master = to_svc_i3c_master(m);
1518 int ret;
1519
1520 master->enabled_events &= ~SVC_I3C_EVENT_IBI;
1521 if (!master->enabled_events)
1522 svc_i3c_master_disable_interrupts(master);
1523
1524 ret = i3c_master_disec_locked(m, dev->info.dyn_addr, I3C_CCC_EVENT_SIR);
1525
1526 pm_runtime_mark_last_busy(master->dev);
1527 pm_runtime_put_autosuspend(master->dev);
1528
1529 return ret;
1530 }
1531
svc_i3c_master_enable_hotjoin(struct i3c_master_controller * m)1532 static int svc_i3c_master_enable_hotjoin(struct i3c_master_controller *m)
1533 {
1534 struct svc_i3c_master *master = to_svc_i3c_master(m);
1535 int ret;
1536
1537 ret = pm_runtime_resume_and_get(master->dev);
1538 if (ret < 0) {
1539 dev_err(master->dev, "<%s> Cannot get runtime PM.\n", __func__);
1540 return ret;
1541 }
1542
1543 master->enabled_events |= SVC_I3C_EVENT_HOTJOIN;
1544
1545 svc_i3c_master_enable_interrupts(master, SVC_I3C_MINT_SLVSTART);
1546
1547 return 0;
1548 }
1549
svc_i3c_master_disable_hotjoin(struct i3c_master_controller * m)1550 static int svc_i3c_master_disable_hotjoin(struct i3c_master_controller *m)
1551 {
1552 struct svc_i3c_master *master = to_svc_i3c_master(m);
1553
1554 master->enabled_events &= ~SVC_I3C_EVENT_HOTJOIN;
1555
1556 if (!master->enabled_events)
1557 svc_i3c_master_disable_interrupts(master);
1558
1559 pm_runtime_mark_last_busy(master->dev);
1560 pm_runtime_put_autosuspend(master->dev);
1561
1562 return 0;
1563 }
1564
svc_i3c_master_recycle_ibi_slot(struct i3c_dev_desc * dev,struct i3c_ibi_slot * slot)1565 static void svc_i3c_master_recycle_ibi_slot(struct i3c_dev_desc *dev,
1566 struct i3c_ibi_slot *slot)
1567 {
1568 struct svc_i3c_i2c_dev_data *data = i3c_dev_get_master_data(dev);
1569
1570 i3c_generic_ibi_recycle_slot(data->ibi_pool, slot);
1571 }
1572
1573 static const struct i3c_master_controller_ops svc_i3c_master_ops = {
1574 .bus_init = svc_i3c_master_bus_init,
1575 .bus_cleanup = svc_i3c_master_bus_cleanup,
1576 .attach_i3c_dev = svc_i3c_master_attach_i3c_dev,
1577 .detach_i3c_dev = svc_i3c_master_detach_i3c_dev,
1578 .reattach_i3c_dev = svc_i3c_master_reattach_i3c_dev,
1579 .attach_i2c_dev = svc_i3c_master_attach_i2c_dev,
1580 .detach_i2c_dev = svc_i3c_master_detach_i2c_dev,
1581 .do_daa = svc_i3c_master_do_daa,
1582 .supports_ccc_cmd = svc_i3c_master_supports_ccc_cmd,
1583 .send_ccc_cmd = svc_i3c_master_send_ccc_cmd,
1584 .priv_xfers = svc_i3c_master_priv_xfers,
1585 .i2c_xfers = svc_i3c_master_i2c_xfers,
1586 .request_ibi = svc_i3c_master_request_ibi,
1587 .free_ibi = svc_i3c_master_free_ibi,
1588 .recycle_ibi_slot = svc_i3c_master_recycle_ibi_slot,
1589 .enable_ibi = svc_i3c_master_enable_ibi,
1590 .disable_ibi = svc_i3c_master_disable_ibi,
1591 .enable_hotjoin = svc_i3c_master_enable_hotjoin,
1592 .disable_hotjoin = svc_i3c_master_disable_hotjoin,
1593 };
1594
svc_i3c_master_prepare_clks(struct svc_i3c_master * master)1595 static int svc_i3c_master_prepare_clks(struct svc_i3c_master *master)
1596 {
1597 int ret = 0;
1598
1599 ret = clk_prepare_enable(master->pclk);
1600 if (ret)
1601 return ret;
1602
1603 ret = clk_prepare_enable(master->fclk);
1604 if (ret) {
1605 clk_disable_unprepare(master->pclk);
1606 return ret;
1607 }
1608
1609 ret = clk_prepare_enable(master->sclk);
1610 if (ret) {
1611 clk_disable_unprepare(master->pclk);
1612 clk_disable_unprepare(master->fclk);
1613 return ret;
1614 }
1615
1616 return 0;
1617 }
1618
svc_i3c_master_unprepare_clks(struct svc_i3c_master * master)1619 static void svc_i3c_master_unprepare_clks(struct svc_i3c_master *master)
1620 {
1621 clk_disable_unprepare(master->pclk);
1622 clk_disable_unprepare(master->fclk);
1623 clk_disable_unprepare(master->sclk);
1624 }
1625
svc_i3c_master_probe(struct platform_device * pdev)1626 static int svc_i3c_master_probe(struct platform_device *pdev)
1627 {
1628 struct device *dev = &pdev->dev;
1629 struct svc_i3c_master *master;
1630 int ret;
1631
1632 master = devm_kzalloc(dev, sizeof(*master), GFP_KERNEL);
1633 if (!master)
1634 return -ENOMEM;
1635
1636 master->regs = devm_platform_ioremap_resource(pdev, 0);
1637 if (IS_ERR(master->regs))
1638 return PTR_ERR(master->regs);
1639
1640 master->pclk = devm_clk_get(dev, "pclk");
1641 if (IS_ERR(master->pclk))
1642 return PTR_ERR(master->pclk);
1643
1644 master->fclk = devm_clk_get(dev, "fast_clk");
1645 if (IS_ERR(master->fclk))
1646 return PTR_ERR(master->fclk);
1647
1648 master->sclk = devm_clk_get(dev, "slow_clk");
1649 if (IS_ERR(master->sclk))
1650 return PTR_ERR(master->sclk);
1651
1652 master->irq = platform_get_irq(pdev, 0);
1653 if (master->irq < 0)
1654 return master->irq;
1655
1656 master->dev = dev;
1657
1658 ret = svc_i3c_master_prepare_clks(master);
1659 if (ret)
1660 return ret;
1661
1662 INIT_WORK(&master->hj_work, svc_i3c_master_hj_work);
1663 INIT_WORK(&master->ibi_work, svc_i3c_master_ibi_work);
1664 mutex_init(&master->lock);
1665
1666 ret = devm_request_irq(dev, master->irq, svc_i3c_master_irq_handler,
1667 IRQF_NO_SUSPEND, "svc-i3c-irq", master);
1668 if (ret)
1669 goto err_disable_clks;
1670
1671 master->free_slots = GENMASK(SVC_I3C_MAX_DEVS - 1, 0);
1672
1673 spin_lock_init(&master->xferqueue.lock);
1674 INIT_LIST_HEAD(&master->xferqueue.list);
1675
1676 spin_lock_init(&master->ibi.lock);
1677 master->ibi.num_slots = SVC_I3C_MAX_DEVS;
1678 master->ibi.slots = devm_kcalloc(&pdev->dev, master->ibi.num_slots,
1679 sizeof(*master->ibi.slots),
1680 GFP_KERNEL);
1681 if (!master->ibi.slots) {
1682 ret = -ENOMEM;
1683 goto err_disable_clks;
1684 }
1685
1686 platform_set_drvdata(pdev, master);
1687
1688 pm_runtime_set_autosuspend_delay(&pdev->dev, SVC_I3C_PM_TIMEOUT_MS);
1689 pm_runtime_use_autosuspend(&pdev->dev);
1690 pm_runtime_get_noresume(&pdev->dev);
1691 pm_runtime_set_active(&pdev->dev);
1692 pm_runtime_enable(&pdev->dev);
1693
1694 svc_i3c_master_reset(master);
1695
1696 /* Register the master */
1697 ret = i3c_master_register(&master->base, &pdev->dev,
1698 &svc_i3c_master_ops, false);
1699 if (ret)
1700 goto rpm_disable;
1701
1702 pm_runtime_mark_last_busy(&pdev->dev);
1703 pm_runtime_put_autosuspend(&pdev->dev);
1704
1705 return 0;
1706
1707 rpm_disable:
1708 pm_runtime_dont_use_autosuspend(&pdev->dev);
1709 pm_runtime_put_noidle(&pdev->dev);
1710 pm_runtime_set_suspended(&pdev->dev);
1711 pm_runtime_disable(&pdev->dev);
1712
1713 err_disable_clks:
1714 svc_i3c_master_unprepare_clks(master);
1715
1716 return ret;
1717 }
1718
svc_i3c_master_remove(struct platform_device * pdev)1719 static void svc_i3c_master_remove(struct platform_device *pdev)
1720 {
1721 struct svc_i3c_master *master = platform_get_drvdata(pdev);
1722
1723 i3c_master_unregister(&master->base);
1724
1725 pm_runtime_dont_use_autosuspend(&pdev->dev);
1726 pm_runtime_disable(&pdev->dev);
1727 }
1728
svc_i3c_save_regs(struct svc_i3c_master * master)1729 static void svc_i3c_save_regs(struct svc_i3c_master *master)
1730 {
1731 master->saved_regs.mconfig = readl(master->regs + SVC_I3C_MCONFIG);
1732 master->saved_regs.mdynaddr = readl(master->regs + SVC_I3C_MDYNADDR);
1733 }
1734
svc_i3c_restore_regs(struct svc_i3c_master * master)1735 static void svc_i3c_restore_regs(struct svc_i3c_master *master)
1736 {
1737 if (readl(master->regs + SVC_I3C_MDYNADDR) !=
1738 master->saved_regs.mdynaddr) {
1739 writel(master->saved_regs.mconfig,
1740 master->regs + SVC_I3C_MCONFIG);
1741 writel(master->saved_regs.mdynaddr,
1742 master->regs + SVC_I3C_MDYNADDR);
1743 }
1744 }
1745
svc_i3c_runtime_suspend(struct device * dev)1746 static int __maybe_unused svc_i3c_runtime_suspend(struct device *dev)
1747 {
1748 struct svc_i3c_master *master = dev_get_drvdata(dev);
1749
1750 svc_i3c_save_regs(master);
1751 svc_i3c_master_unprepare_clks(master);
1752 pinctrl_pm_select_sleep_state(dev);
1753
1754 return 0;
1755 }
1756
svc_i3c_runtime_resume(struct device * dev)1757 static int __maybe_unused svc_i3c_runtime_resume(struct device *dev)
1758 {
1759 struct svc_i3c_master *master = dev_get_drvdata(dev);
1760
1761 pinctrl_pm_select_default_state(dev);
1762 svc_i3c_master_prepare_clks(master);
1763
1764 svc_i3c_restore_regs(master);
1765
1766 return 0;
1767 }
1768
1769 static const struct dev_pm_ops svc_i3c_pm_ops = {
1770 SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
1771 pm_runtime_force_resume)
1772 SET_RUNTIME_PM_OPS(svc_i3c_runtime_suspend,
1773 svc_i3c_runtime_resume, NULL)
1774 };
1775
1776 static const struct of_device_id svc_i3c_master_of_match_tbl[] = {
1777 { .compatible = "silvaco,i3c-master-v1"},
1778 { /* sentinel */ },
1779 };
1780 MODULE_DEVICE_TABLE(of, svc_i3c_master_of_match_tbl);
1781
1782 static struct platform_driver svc_i3c_master = {
1783 .probe = svc_i3c_master_probe,
1784 .remove_new = svc_i3c_master_remove,
1785 .driver = {
1786 .name = "silvaco-i3c-master",
1787 .of_match_table = svc_i3c_master_of_match_tbl,
1788 .pm = &svc_i3c_pm_ops,
1789 },
1790 };
1791 module_platform_driver(svc_i3c_master);
1792
1793 MODULE_AUTHOR("Conor Culhane <conor.culhane@silvaco.com>");
1794 MODULE_AUTHOR("Miquel Raynal <miquel.raynal@bootlin.com>");
1795 MODULE_DESCRIPTION("Silvaco dual-role I3C master driver");
1796 MODULE_LICENSE("GPL v2");
1797