1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Copyright (c) 2018-2020, The Linux Foundation. All rights reserved.
4 *
5 */
6
7 #include <linux/delay.h>
8 #include <linux/device.h>
9 #include <linux/dma-direction.h>
10 #include <linux/dma-mapping.h>
11 #include <linux/interrupt.h>
12 #include <linux/list.h>
13 #include <linux/mhi.h>
14 #include <linux/module.h>
15 #include <linux/slab.h>
16 #include <linux/wait.h>
17 #include "internal.h"
18
19 /*
20 * Not all MHI state transitions are synchronous. Transitions like Linkdown,
21 * SYS_ERR, and shutdown can happen anytime asynchronously. This function will
22 * transition to a new state only if we're allowed to.
23 *
24 * Priority increases as we go down. For instance, from any state in L0, the
25 * transition can be made to states in L1, L2 and L3. A notable exception to
26 * this rule is state DISABLE. From DISABLE state we can only transition to
27 * POR state. Also, while in L2 state, user cannot jump back to previous
28 * L1 or L0 states.
29 *
30 * Valid transitions:
31 * L0: DISABLE <--> POR
32 * POR <--> POR
33 * POR -> M0 -> M2 --> M0
34 * POR -> FW_DL_ERR
35 * FW_DL_ERR <--> FW_DL_ERR
36 * M0 <--> M0
37 * M0 -> FW_DL_ERR
38 * M0 -> M3_ENTER -> M3 -> M3_EXIT --> M0
39 * L1: SYS_ERR_DETECT -> SYS_ERR_PROCESS --> POR
40 * L2: SHUTDOWN_PROCESS -> LD_ERR_FATAL_DETECT
41 * SHUTDOWN_PROCESS -> DISABLE
42 * L3: LD_ERR_FATAL_DETECT <--> LD_ERR_FATAL_DETECT
43 * LD_ERR_FATAL_DETECT -> DISABLE
44 */
45 static struct mhi_pm_transitions const dev_state_transitions[] = {
46 /* L0 States */
47 {
48 MHI_PM_DISABLE,
49 MHI_PM_POR
50 },
51 {
52 MHI_PM_POR,
53 MHI_PM_POR | MHI_PM_DISABLE | MHI_PM_M0 |
54 MHI_PM_SYS_ERR_DETECT | MHI_PM_SHUTDOWN_PROCESS |
55 MHI_PM_LD_ERR_FATAL_DETECT | MHI_PM_FW_DL_ERR
56 },
57 {
58 MHI_PM_M0,
59 MHI_PM_M0 | MHI_PM_M2 | MHI_PM_M3_ENTER |
60 MHI_PM_SYS_ERR_DETECT | MHI_PM_SHUTDOWN_PROCESS |
61 MHI_PM_LD_ERR_FATAL_DETECT | MHI_PM_FW_DL_ERR
62 },
63 {
64 MHI_PM_M2,
65 MHI_PM_M0 | MHI_PM_SYS_ERR_DETECT | MHI_PM_SHUTDOWN_PROCESS |
66 MHI_PM_LD_ERR_FATAL_DETECT
67 },
68 {
69 MHI_PM_M3_ENTER,
70 MHI_PM_M3 | MHI_PM_SYS_ERR_DETECT | MHI_PM_SHUTDOWN_PROCESS |
71 MHI_PM_LD_ERR_FATAL_DETECT
72 },
73 {
74 MHI_PM_M3,
75 MHI_PM_M3_EXIT | MHI_PM_SYS_ERR_DETECT |
76 MHI_PM_LD_ERR_FATAL_DETECT
77 },
78 {
79 MHI_PM_M3_EXIT,
80 MHI_PM_M0 | MHI_PM_SYS_ERR_DETECT | MHI_PM_SHUTDOWN_PROCESS |
81 MHI_PM_LD_ERR_FATAL_DETECT
82 },
83 {
84 MHI_PM_FW_DL_ERR,
85 MHI_PM_FW_DL_ERR | MHI_PM_SYS_ERR_DETECT |
86 MHI_PM_SHUTDOWN_PROCESS | MHI_PM_LD_ERR_FATAL_DETECT
87 },
88 /* L1 States */
89 {
90 MHI_PM_SYS_ERR_DETECT,
91 MHI_PM_SYS_ERR_PROCESS | MHI_PM_SHUTDOWN_PROCESS |
92 MHI_PM_LD_ERR_FATAL_DETECT
93 },
94 {
95 MHI_PM_SYS_ERR_PROCESS,
96 MHI_PM_POR | MHI_PM_SHUTDOWN_PROCESS |
97 MHI_PM_LD_ERR_FATAL_DETECT
98 },
99 /* L2 States */
100 {
101 MHI_PM_SHUTDOWN_PROCESS,
102 MHI_PM_DISABLE | MHI_PM_LD_ERR_FATAL_DETECT
103 },
104 /* L3 States */
105 {
106 MHI_PM_LD_ERR_FATAL_DETECT,
107 MHI_PM_LD_ERR_FATAL_DETECT | MHI_PM_DISABLE
108 },
109 };
110
mhi_tryset_pm_state(struct mhi_controller * mhi_cntrl,enum mhi_pm_state state)111 enum mhi_pm_state __must_check mhi_tryset_pm_state(struct mhi_controller *mhi_cntrl,
112 enum mhi_pm_state state)
113 {
114 unsigned long cur_state = mhi_cntrl->pm_state;
115 int index = find_last_bit(&cur_state, 32);
116
117 if (unlikely(index >= ARRAY_SIZE(dev_state_transitions)))
118 return cur_state;
119
120 if (unlikely(dev_state_transitions[index].from_state != cur_state))
121 return cur_state;
122
123 if (unlikely(!(dev_state_transitions[index].to_states & state)))
124 return cur_state;
125
126 mhi_cntrl->pm_state = state;
127 return mhi_cntrl->pm_state;
128 }
129
mhi_set_mhi_state(struct mhi_controller * mhi_cntrl,enum mhi_state state)130 void mhi_set_mhi_state(struct mhi_controller *mhi_cntrl, enum mhi_state state)
131 {
132 if (state == MHI_STATE_RESET) {
133 mhi_write_reg_field(mhi_cntrl, mhi_cntrl->regs, MHICTRL,
134 MHICTRL_RESET_MASK, MHICTRL_RESET_SHIFT, 1);
135 } else {
136 mhi_write_reg_field(mhi_cntrl, mhi_cntrl->regs, MHICTRL,
137 MHICTRL_MHISTATE_MASK,
138 MHICTRL_MHISTATE_SHIFT, state);
139 }
140 }
141
142 /* NOP for backward compatibility, host allowed to ring DB in M2 state */
mhi_toggle_dev_wake_nop(struct mhi_controller * mhi_cntrl)143 static void mhi_toggle_dev_wake_nop(struct mhi_controller *mhi_cntrl)
144 {
145 }
146
mhi_toggle_dev_wake(struct mhi_controller * mhi_cntrl)147 static void mhi_toggle_dev_wake(struct mhi_controller *mhi_cntrl)
148 {
149 mhi_cntrl->wake_get(mhi_cntrl, false);
150 mhi_cntrl->wake_put(mhi_cntrl, true);
151 }
152
153 /* Handle device ready state transition */
mhi_ready_state_transition(struct mhi_controller * mhi_cntrl)154 int mhi_ready_state_transition(struct mhi_controller *mhi_cntrl)
155 {
156 struct mhi_event *mhi_event;
157 enum mhi_pm_state cur_state;
158 struct device *dev = &mhi_cntrl->mhi_dev->dev;
159 u32 interval_us = 25000; /* poll register field every 25 milliseconds */
160 int ret, i;
161
162 /* Check if device entered error state */
163 if (MHI_PM_IN_FATAL_STATE(mhi_cntrl->pm_state)) {
164 dev_err(dev, "Device link is not accessible\n");
165 return -EIO;
166 }
167
168 /* Wait for RESET to be cleared and READY bit to be set by the device */
169 ret = mhi_poll_reg_field(mhi_cntrl, mhi_cntrl->regs, MHICTRL,
170 MHICTRL_RESET_MASK, MHICTRL_RESET_SHIFT, 0,
171 interval_us);
172 if (ret) {
173 dev_err(dev, "Device failed to clear MHI Reset\n");
174 return ret;
175 }
176
177 ret = mhi_poll_reg_field(mhi_cntrl, mhi_cntrl->regs, MHISTATUS,
178 MHISTATUS_READY_MASK, MHISTATUS_READY_SHIFT, 1,
179 interval_us);
180 if (ret) {
181 dev_err(dev, "Device failed to enter MHI Ready\n");
182 return ret;
183 }
184
185 dev_dbg(dev, "Device in READY State\n");
186 write_lock_irq(&mhi_cntrl->pm_lock);
187 cur_state = mhi_tryset_pm_state(mhi_cntrl, MHI_PM_POR);
188 mhi_cntrl->dev_state = MHI_STATE_READY;
189 write_unlock_irq(&mhi_cntrl->pm_lock);
190
191 if (cur_state != MHI_PM_POR) {
192 dev_err(dev, "Error moving to state %s from %s\n",
193 to_mhi_pm_state_str(MHI_PM_POR),
194 to_mhi_pm_state_str(cur_state));
195 return -EIO;
196 }
197
198 read_lock_bh(&mhi_cntrl->pm_lock);
199 if (!MHI_REG_ACCESS_VALID(mhi_cntrl->pm_state)) {
200 dev_err(dev, "Device registers not accessible\n");
201 goto error_mmio;
202 }
203
204 /* Configure MMIO registers */
205 ret = mhi_init_mmio(mhi_cntrl);
206 if (ret) {
207 dev_err(dev, "Error configuring MMIO registers\n");
208 goto error_mmio;
209 }
210
211 /* Add elements to all SW event rings */
212 mhi_event = mhi_cntrl->mhi_event;
213 for (i = 0; i < mhi_cntrl->total_ev_rings; i++, mhi_event++) {
214 struct mhi_ring *ring = &mhi_event->ring;
215
216 /* Skip if this is an offload or HW event */
217 if (mhi_event->offload_ev || mhi_event->hw_ring)
218 continue;
219
220 ring->wp = ring->base + ring->len - ring->el_size;
221 *ring->ctxt_wp = ring->iommu_base + ring->len - ring->el_size;
222 /* Update all cores */
223 smp_wmb();
224
225 /* Ring the event ring db */
226 spin_lock_irq(&mhi_event->lock);
227 mhi_ring_er_db(mhi_event);
228 spin_unlock_irq(&mhi_event->lock);
229 }
230
231 /* Set MHI to M0 state */
232 mhi_set_mhi_state(mhi_cntrl, MHI_STATE_M0);
233 read_unlock_bh(&mhi_cntrl->pm_lock);
234
235 return 0;
236
237 error_mmio:
238 read_unlock_bh(&mhi_cntrl->pm_lock);
239
240 return -EIO;
241 }
242
mhi_pm_m0_transition(struct mhi_controller * mhi_cntrl)243 int mhi_pm_m0_transition(struct mhi_controller *mhi_cntrl)
244 {
245 enum mhi_pm_state cur_state;
246 struct mhi_chan *mhi_chan;
247 struct device *dev = &mhi_cntrl->mhi_dev->dev;
248 int i;
249
250 write_lock_irq(&mhi_cntrl->pm_lock);
251 mhi_cntrl->dev_state = MHI_STATE_M0;
252 cur_state = mhi_tryset_pm_state(mhi_cntrl, MHI_PM_M0);
253 write_unlock_irq(&mhi_cntrl->pm_lock);
254 if (unlikely(cur_state != MHI_PM_M0)) {
255 dev_err(dev, "Unable to transition to M0 state\n");
256 return -EIO;
257 }
258 mhi_cntrl->M0++;
259
260 /* Wake up the device */
261 read_lock_bh(&mhi_cntrl->pm_lock);
262 mhi_cntrl->wake_get(mhi_cntrl, true);
263
264 /* Ring all event rings and CMD ring only if we're in mission mode */
265 if (MHI_IN_MISSION_MODE(mhi_cntrl->ee)) {
266 struct mhi_event *mhi_event = mhi_cntrl->mhi_event;
267 struct mhi_cmd *mhi_cmd =
268 &mhi_cntrl->mhi_cmd[PRIMARY_CMD_RING];
269
270 for (i = 0; i < mhi_cntrl->total_ev_rings; i++, mhi_event++) {
271 if (mhi_event->offload_ev)
272 continue;
273
274 spin_lock_irq(&mhi_event->lock);
275 mhi_ring_er_db(mhi_event);
276 spin_unlock_irq(&mhi_event->lock);
277 }
278
279 /* Only ring primary cmd ring if ring is not empty */
280 spin_lock_irq(&mhi_cmd->lock);
281 if (mhi_cmd->ring.rp != mhi_cmd->ring.wp)
282 mhi_ring_cmd_db(mhi_cntrl, mhi_cmd);
283 spin_unlock_irq(&mhi_cmd->lock);
284 }
285
286 /* Ring channel DB registers */
287 mhi_chan = mhi_cntrl->mhi_chan;
288 for (i = 0; i < mhi_cntrl->max_chan; i++, mhi_chan++) {
289 struct mhi_ring *tre_ring = &mhi_chan->tre_ring;
290
291 if (mhi_chan->db_cfg.reset_req) {
292 write_lock_irq(&mhi_chan->lock);
293 mhi_chan->db_cfg.db_mode = true;
294 write_unlock_irq(&mhi_chan->lock);
295 }
296
297 read_lock_irq(&mhi_chan->lock);
298
299 /* Only ring DB if ring is not empty */
300 if (tre_ring->base && tre_ring->wp != tre_ring->rp)
301 mhi_ring_chan_db(mhi_cntrl, mhi_chan);
302 read_unlock_irq(&mhi_chan->lock);
303 }
304
305 mhi_cntrl->wake_put(mhi_cntrl, false);
306 read_unlock_bh(&mhi_cntrl->pm_lock);
307 wake_up_all(&mhi_cntrl->state_event);
308
309 return 0;
310 }
311
312 /*
313 * After receiving the MHI state change event from the device indicating the
314 * transition to M1 state, the host can transition the device to M2 state
315 * for keeping it in low power state.
316 */
mhi_pm_m1_transition(struct mhi_controller * mhi_cntrl)317 void mhi_pm_m1_transition(struct mhi_controller *mhi_cntrl)
318 {
319 enum mhi_pm_state state;
320 struct device *dev = &mhi_cntrl->mhi_dev->dev;
321
322 write_lock_irq(&mhi_cntrl->pm_lock);
323 state = mhi_tryset_pm_state(mhi_cntrl, MHI_PM_M2);
324 if (state == MHI_PM_M2) {
325 mhi_set_mhi_state(mhi_cntrl, MHI_STATE_M2);
326 mhi_cntrl->dev_state = MHI_STATE_M2;
327
328 write_unlock_irq(&mhi_cntrl->pm_lock);
329
330 mhi_cntrl->M2++;
331 wake_up_all(&mhi_cntrl->state_event);
332
333 /* If there are any pending resources, exit M2 immediately */
334 if (unlikely(atomic_read(&mhi_cntrl->pending_pkts) ||
335 atomic_read(&mhi_cntrl->dev_wake))) {
336 dev_dbg(dev,
337 "Exiting M2, pending_pkts: %d dev_wake: %d\n",
338 atomic_read(&mhi_cntrl->pending_pkts),
339 atomic_read(&mhi_cntrl->dev_wake));
340 read_lock_bh(&mhi_cntrl->pm_lock);
341 mhi_cntrl->wake_get(mhi_cntrl, true);
342 mhi_cntrl->wake_put(mhi_cntrl, true);
343 read_unlock_bh(&mhi_cntrl->pm_lock);
344 } else {
345 mhi_cntrl->status_cb(mhi_cntrl, MHI_CB_IDLE);
346 }
347 } else {
348 write_unlock_irq(&mhi_cntrl->pm_lock);
349 }
350 }
351
352 /* MHI M3 completion handler */
mhi_pm_m3_transition(struct mhi_controller * mhi_cntrl)353 int mhi_pm_m3_transition(struct mhi_controller *mhi_cntrl)
354 {
355 enum mhi_pm_state state;
356 struct device *dev = &mhi_cntrl->mhi_dev->dev;
357
358 write_lock_irq(&mhi_cntrl->pm_lock);
359 mhi_cntrl->dev_state = MHI_STATE_M3;
360 state = mhi_tryset_pm_state(mhi_cntrl, MHI_PM_M3);
361 write_unlock_irq(&mhi_cntrl->pm_lock);
362 if (state != MHI_PM_M3) {
363 dev_err(dev, "Unable to transition to M3 state\n");
364 return -EIO;
365 }
366
367 mhi_cntrl->M3++;
368 wake_up_all(&mhi_cntrl->state_event);
369
370 return 0;
371 }
372
373 /* Handle device Mission Mode transition */
mhi_pm_mission_mode_transition(struct mhi_controller * mhi_cntrl)374 static int mhi_pm_mission_mode_transition(struct mhi_controller *mhi_cntrl)
375 {
376 struct mhi_event *mhi_event;
377 struct device *dev = &mhi_cntrl->mhi_dev->dev;
378 enum mhi_ee_type ee = MHI_EE_MAX, current_ee = mhi_cntrl->ee;
379 int i, ret;
380
381 dev_dbg(dev, "Processing Mission Mode transition\n");
382
383 write_lock_irq(&mhi_cntrl->pm_lock);
384 if (MHI_REG_ACCESS_VALID(mhi_cntrl->pm_state))
385 ee = mhi_get_exec_env(mhi_cntrl);
386
387 if (!MHI_IN_MISSION_MODE(ee)) {
388 mhi_cntrl->pm_state = MHI_PM_LD_ERR_FATAL_DETECT;
389 write_unlock_irq(&mhi_cntrl->pm_lock);
390 wake_up_all(&mhi_cntrl->state_event);
391 return -EIO;
392 }
393 mhi_cntrl->ee = ee;
394 write_unlock_irq(&mhi_cntrl->pm_lock);
395
396 wake_up_all(&mhi_cntrl->state_event);
397
398 device_for_each_child(&mhi_cntrl->mhi_dev->dev, ¤t_ee,
399 mhi_destroy_device);
400 mhi_cntrl->status_cb(mhi_cntrl, MHI_CB_EE_MISSION_MODE);
401
402 /* Force MHI to be in M0 state before continuing */
403 ret = __mhi_device_get_sync(mhi_cntrl);
404 if (ret)
405 return ret;
406
407 read_lock_bh(&mhi_cntrl->pm_lock);
408
409 if (MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state)) {
410 ret = -EIO;
411 goto error_mission_mode;
412 }
413
414 /* Add elements to all HW event rings */
415 mhi_event = mhi_cntrl->mhi_event;
416 for (i = 0; i < mhi_cntrl->total_ev_rings; i++, mhi_event++) {
417 struct mhi_ring *ring = &mhi_event->ring;
418
419 if (mhi_event->offload_ev || !mhi_event->hw_ring)
420 continue;
421
422 ring->wp = ring->base + ring->len - ring->el_size;
423 *ring->ctxt_wp = ring->iommu_base + ring->len - ring->el_size;
424 /* Update to all cores */
425 smp_wmb();
426
427 spin_lock_irq(&mhi_event->lock);
428 if (MHI_DB_ACCESS_VALID(mhi_cntrl))
429 mhi_ring_er_db(mhi_event);
430 spin_unlock_irq(&mhi_event->lock);
431 }
432
433 read_unlock_bh(&mhi_cntrl->pm_lock);
434
435 /*
436 * The MHI devices are only created when the client device switches its
437 * Execution Environment (EE) to either SBL or AMSS states
438 */
439 mhi_create_devices(mhi_cntrl);
440
441 read_lock_bh(&mhi_cntrl->pm_lock);
442
443 error_mission_mode:
444 mhi_cntrl->wake_put(mhi_cntrl, false);
445 read_unlock_bh(&mhi_cntrl->pm_lock);
446
447 return ret;
448 }
449
450 /* Handle shutdown transitions */
mhi_pm_disable_transition(struct mhi_controller * mhi_cntrl)451 static void mhi_pm_disable_transition(struct mhi_controller *mhi_cntrl)
452 {
453 enum mhi_pm_state cur_state;
454 struct mhi_event *mhi_event;
455 struct mhi_cmd_ctxt *cmd_ctxt;
456 struct mhi_cmd *mhi_cmd;
457 struct mhi_event_ctxt *er_ctxt;
458 struct device *dev = &mhi_cntrl->mhi_dev->dev;
459 int ret, i;
460
461 dev_dbg(dev, "Processing disable transition with PM state: %s\n",
462 to_mhi_pm_state_str(mhi_cntrl->pm_state));
463
464 mutex_lock(&mhi_cntrl->pm_mutex);
465
466 /* Trigger MHI RESET so that the device will not access host memory */
467 if (!MHI_PM_IN_FATAL_STATE(mhi_cntrl->pm_state)) {
468 u32 in_reset = -1;
469 unsigned long timeout = msecs_to_jiffies(mhi_cntrl->timeout_ms);
470
471 dev_dbg(dev, "Triggering MHI Reset in device\n");
472 mhi_set_mhi_state(mhi_cntrl, MHI_STATE_RESET);
473
474 /* Wait for the reset bit to be cleared by the device */
475 ret = wait_event_timeout(mhi_cntrl->state_event,
476 mhi_read_reg_field(mhi_cntrl,
477 mhi_cntrl->regs,
478 MHICTRL,
479 MHICTRL_RESET_MASK,
480 MHICTRL_RESET_SHIFT,
481 &in_reset) ||
482 !in_reset, timeout);
483 if (!ret || in_reset)
484 dev_err(dev, "Device failed to exit MHI Reset state\n");
485
486 /*
487 * Device will clear BHI_INTVEC as a part of RESET processing,
488 * hence re-program it
489 */
490 mhi_write_reg(mhi_cntrl, mhi_cntrl->bhi, BHI_INTVEC, 0);
491 }
492
493 dev_dbg(dev,
494 "Waiting for all pending event ring processing to complete\n");
495 mhi_event = mhi_cntrl->mhi_event;
496 for (i = 0; i < mhi_cntrl->total_ev_rings; i++, mhi_event++) {
497 if (mhi_event->offload_ev)
498 continue;
499 free_irq(mhi_cntrl->irq[mhi_event->irq], mhi_event);
500 tasklet_kill(&mhi_event->task);
501 }
502
503 /* Release lock and wait for all pending threads to complete */
504 mutex_unlock(&mhi_cntrl->pm_mutex);
505 dev_dbg(dev, "Waiting for all pending threads to complete\n");
506 wake_up_all(&mhi_cntrl->state_event);
507
508 dev_dbg(dev, "Reset all active channels and remove MHI devices\n");
509 device_for_each_child(&mhi_cntrl->mhi_dev->dev, NULL, mhi_destroy_device);
510
511 mutex_lock(&mhi_cntrl->pm_mutex);
512
513 WARN_ON(atomic_read(&mhi_cntrl->dev_wake));
514 WARN_ON(atomic_read(&mhi_cntrl->pending_pkts));
515
516 /* Reset the ev rings and cmd rings */
517 dev_dbg(dev, "Resetting EV CTXT and CMD CTXT\n");
518 mhi_cmd = mhi_cntrl->mhi_cmd;
519 cmd_ctxt = mhi_cntrl->mhi_ctxt->cmd_ctxt;
520 for (i = 0; i < NR_OF_CMD_RINGS; i++, mhi_cmd++, cmd_ctxt++) {
521 struct mhi_ring *ring = &mhi_cmd->ring;
522
523 ring->rp = ring->base;
524 ring->wp = ring->base;
525 cmd_ctxt->rp = cmd_ctxt->rbase;
526 cmd_ctxt->wp = cmd_ctxt->rbase;
527 }
528
529 mhi_event = mhi_cntrl->mhi_event;
530 er_ctxt = mhi_cntrl->mhi_ctxt->er_ctxt;
531 for (i = 0; i < mhi_cntrl->total_ev_rings; i++, er_ctxt++,
532 mhi_event++) {
533 struct mhi_ring *ring = &mhi_event->ring;
534
535 /* Skip offload events */
536 if (mhi_event->offload_ev)
537 continue;
538
539 ring->rp = ring->base;
540 ring->wp = ring->base;
541 er_ctxt->rp = er_ctxt->rbase;
542 er_ctxt->wp = er_ctxt->rbase;
543 }
544
545 /* Move to disable state */
546 write_lock_irq(&mhi_cntrl->pm_lock);
547 cur_state = mhi_tryset_pm_state(mhi_cntrl, MHI_PM_DISABLE);
548 write_unlock_irq(&mhi_cntrl->pm_lock);
549 if (unlikely(cur_state != MHI_PM_DISABLE))
550 dev_err(dev, "Error moving from PM state: %s to: %s\n",
551 to_mhi_pm_state_str(cur_state),
552 to_mhi_pm_state_str(MHI_PM_DISABLE));
553
554 dev_dbg(dev, "Exiting with PM state: %s, MHI state: %s\n",
555 to_mhi_pm_state_str(mhi_cntrl->pm_state),
556 TO_MHI_STATE_STR(mhi_cntrl->dev_state));
557
558 mutex_unlock(&mhi_cntrl->pm_mutex);
559 }
560
561 /* Handle system error transitions */
mhi_pm_sys_error_transition(struct mhi_controller * mhi_cntrl)562 static void mhi_pm_sys_error_transition(struct mhi_controller *mhi_cntrl)
563 {
564 enum mhi_pm_state cur_state, prev_state;
565 enum dev_st_transition next_state;
566 struct mhi_event *mhi_event;
567 struct mhi_cmd_ctxt *cmd_ctxt;
568 struct mhi_cmd *mhi_cmd;
569 struct mhi_event_ctxt *er_ctxt;
570 struct device *dev = &mhi_cntrl->mhi_dev->dev;
571 int ret, i;
572
573 dev_dbg(dev, "Transitioning from PM state: %s to: %s\n",
574 to_mhi_pm_state_str(mhi_cntrl->pm_state),
575 to_mhi_pm_state_str(MHI_PM_SYS_ERR_PROCESS));
576
577 /* We must notify MHI control driver so it can clean up first */
578 mhi_cntrl->status_cb(mhi_cntrl, MHI_CB_SYS_ERROR);
579
580 mutex_lock(&mhi_cntrl->pm_mutex);
581 write_lock_irq(&mhi_cntrl->pm_lock);
582 prev_state = mhi_cntrl->pm_state;
583 cur_state = mhi_tryset_pm_state(mhi_cntrl, MHI_PM_SYS_ERR_PROCESS);
584 write_unlock_irq(&mhi_cntrl->pm_lock);
585
586 if (cur_state != MHI_PM_SYS_ERR_PROCESS) {
587 dev_err(dev, "Failed to transition from PM state: %s to: %s\n",
588 to_mhi_pm_state_str(cur_state),
589 to_mhi_pm_state_str(MHI_PM_SYS_ERR_PROCESS));
590 goto exit_sys_error_transition;
591 }
592
593 mhi_cntrl->ee = MHI_EE_DISABLE_TRANSITION;
594 mhi_cntrl->dev_state = MHI_STATE_RESET;
595
596 /* Wake up threads waiting for state transition */
597 wake_up_all(&mhi_cntrl->state_event);
598
599 /* Trigger MHI RESET so that the device will not access host memory */
600 if (MHI_REG_ACCESS_VALID(prev_state)) {
601 u32 in_reset = -1;
602 unsigned long timeout = msecs_to_jiffies(mhi_cntrl->timeout_ms);
603
604 dev_dbg(dev, "Triggering MHI Reset in device\n");
605 mhi_set_mhi_state(mhi_cntrl, MHI_STATE_RESET);
606
607 /* Wait for the reset bit to be cleared by the device */
608 ret = wait_event_timeout(mhi_cntrl->state_event,
609 mhi_read_reg_field(mhi_cntrl,
610 mhi_cntrl->regs,
611 MHICTRL,
612 MHICTRL_RESET_MASK,
613 MHICTRL_RESET_SHIFT,
614 &in_reset) ||
615 !in_reset, timeout);
616 if (!ret || in_reset) {
617 dev_err(dev, "Device failed to exit MHI Reset state\n");
618 goto exit_sys_error_transition;
619 }
620
621 /*
622 * Device will clear BHI_INTVEC as a part of RESET processing,
623 * hence re-program it
624 */
625 mhi_write_reg(mhi_cntrl, mhi_cntrl->bhi, BHI_INTVEC, 0);
626 }
627
628 dev_dbg(dev,
629 "Waiting for all pending event ring processing to complete\n");
630 mhi_event = mhi_cntrl->mhi_event;
631 for (i = 0; i < mhi_cntrl->total_ev_rings; i++, mhi_event++) {
632 if (mhi_event->offload_ev)
633 continue;
634 tasklet_kill(&mhi_event->task);
635 }
636
637 /* Release lock and wait for all pending threads to complete */
638 mutex_unlock(&mhi_cntrl->pm_mutex);
639 dev_dbg(dev, "Waiting for all pending threads to complete\n");
640 wake_up_all(&mhi_cntrl->state_event);
641
642 dev_dbg(dev, "Reset all active channels and remove MHI devices\n");
643 device_for_each_child(&mhi_cntrl->mhi_dev->dev, NULL, mhi_destroy_device);
644
645 mutex_lock(&mhi_cntrl->pm_mutex);
646
647 WARN_ON(atomic_read(&mhi_cntrl->dev_wake));
648 WARN_ON(atomic_read(&mhi_cntrl->pending_pkts));
649
650 /* Reset the ev rings and cmd rings */
651 dev_dbg(dev, "Resetting EV CTXT and CMD CTXT\n");
652 mhi_cmd = mhi_cntrl->mhi_cmd;
653 cmd_ctxt = mhi_cntrl->mhi_ctxt->cmd_ctxt;
654 for (i = 0; i < NR_OF_CMD_RINGS; i++, mhi_cmd++, cmd_ctxt++) {
655 struct mhi_ring *ring = &mhi_cmd->ring;
656
657 ring->rp = ring->base;
658 ring->wp = ring->base;
659 cmd_ctxt->rp = cmd_ctxt->rbase;
660 cmd_ctxt->wp = cmd_ctxt->rbase;
661 }
662
663 mhi_event = mhi_cntrl->mhi_event;
664 er_ctxt = mhi_cntrl->mhi_ctxt->er_ctxt;
665 for (i = 0; i < mhi_cntrl->total_ev_rings; i++, er_ctxt++,
666 mhi_event++) {
667 struct mhi_ring *ring = &mhi_event->ring;
668
669 /* Skip offload events */
670 if (mhi_event->offload_ev)
671 continue;
672
673 ring->rp = ring->base;
674 ring->wp = ring->base;
675 er_ctxt->rp = er_ctxt->rbase;
676 er_ctxt->wp = er_ctxt->rbase;
677 }
678
679 /* Transition to next state */
680 if (MHI_IN_PBL(mhi_get_exec_env(mhi_cntrl))) {
681 write_lock_irq(&mhi_cntrl->pm_lock);
682 cur_state = mhi_tryset_pm_state(mhi_cntrl, MHI_PM_POR);
683 write_unlock_irq(&mhi_cntrl->pm_lock);
684 if (cur_state != MHI_PM_POR) {
685 dev_err(dev, "Error moving to state %s from %s\n",
686 to_mhi_pm_state_str(MHI_PM_POR),
687 to_mhi_pm_state_str(cur_state));
688 goto exit_sys_error_transition;
689 }
690 next_state = DEV_ST_TRANSITION_PBL;
691 } else {
692 next_state = DEV_ST_TRANSITION_READY;
693 }
694
695 mhi_queue_state_transition(mhi_cntrl, next_state);
696
697 exit_sys_error_transition:
698 dev_dbg(dev, "Exiting with PM state: %s, MHI state: %s\n",
699 to_mhi_pm_state_str(mhi_cntrl->pm_state),
700 TO_MHI_STATE_STR(mhi_cntrl->dev_state));
701
702 mutex_unlock(&mhi_cntrl->pm_mutex);
703 }
704
705 /* Queue a new work item and schedule work */
mhi_queue_state_transition(struct mhi_controller * mhi_cntrl,enum dev_st_transition state)706 int mhi_queue_state_transition(struct mhi_controller *mhi_cntrl,
707 enum dev_st_transition state)
708 {
709 struct state_transition *item = kmalloc(sizeof(*item), GFP_ATOMIC);
710 unsigned long flags;
711
712 if (!item)
713 return -ENOMEM;
714
715 item->state = state;
716 spin_lock_irqsave(&mhi_cntrl->transition_lock, flags);
717 list_add_tail(&item->node, &mhi_cntrl->transition_list);
718 spin_unlock_irqrestore(&mhi_cntrl->transition_lock, flags);
719
720 queue_work(mhi_cntrl->hiprio_wq, &mhi_cntrl->st_worker);
721
722 return 0;
723 }
724
725 /* SYS_ERR worker */
mhi_pm_sys_err_handler(struct mhi_controller * mhi_cntrl)726 void mhi_pm_sys_err_handler(struct mhi_controller *mhi_cntrl)
727 {
728 struct device *dev = &mhi_cntrl->mhi_dev->dev;
729
730 /* skip if controller supports RDDM */
731 if (mhi_cntrl->rddm_image) {
732 dev_dbg(dev, "Controller supports RDDM, skip SYS_ERROR\n");
733 return;
734 }
735
736 mhi_queue_state_transition(mhi_cntrl, DEV_ST_TRANSITION_SYS_ERR);
737 }
738
739 /* Device State Transition worker */
mhi_pm_st_worker(struct work_struct * work)740 void mhi_pm_st_worker(struct work_struct *work)
741 {
742 struct state_transition *itr, *tmp;
743 LIST_HEAD(head);
744 struct mhi_controller *mhi_cntrl = container_of(work,
745 struct mhi_controller,
746 st_worker);
747 struct device *dev = &mhi_cntrl->mhi_dev->dev;
748
749 spin_lock_irq(&mhi_cntrl->transition_lock);
750 list_splice_tail_init(&mhi_cntrl->transition_list, &head);
751 spin_unlock_irq(&mhi_cntrl->transition_lock);
752
753 list_for_each_entry_safe(itr, tmp, &head, node) {
754 list_del(&itr->node);
755 dev_dbg(dev, "Handling state transition: %s\n",
756 TO_DEV_STATE_TRANS_STR(itr->state));
757
758 switch (itr->state) {
759 case DEV_ST_TRANSITION_PBL:
760 write_lock_irq(&mhi_cntrl->pm_lock);
761 if (MHI_REG_ACCESS_VALID(mhi_cntrl->pm_state))
762 mhi_cntrl->ee = mhi_get_exec_env(mhi_cntrl);
763 write_unlock_irq(&mhi_cntrl->pm_lock);
764 mhi_fw_load_handler(mhi_cntrl);
765 break;
766 case DEV_ST_TRANSITION_SBL:
767 write_lock_irq(&mhi_cntrl->pm_lock);
768 mhi_cntrl->ee = MHI_EE_SBL;
769 write_unlock_irq(&mhi_cntrl->pm_lock);
770 /*
771 * The MHI devices are only created when the client
772 * device switches its Execution Environment (EE) to
773 * either SBL or AMSS states
774 */
775 mhi_create_devices(mhi_cntrl);
776 if (mhi_cntrl->fbc_download)
777 mhi_download_amss_image(mhi_cntrl);
778 break;
779 case DEV_ST_TRANSITION_MISSION_MODE:
780 mhi_pm_mission_mode_transition(mhi_cntrl);
781 break;
782 case DEV_ST_TRANSITION_FP:
783 write_lock_irq(&mhi_cntrl->pm_lock);
784 mhi_cntrl->ee = MHI_EE_FP;
785 write_unlock_irq(&mhi_cntrl->pm_lock);
786 mhi_create_devices(mhi_cntrl);
787 break;
788 case DEV_ST_TRANSITION_READY:
789 mhi_ready_state_transition(mhi_cntrl);
790 break;
791 case DEV_ST_TRANSITION_SYS_ERR:
792 mhi_pm_sys_error_transition(mhi_cntrl);
793 break;
794 case DEV_ST_TRANSITION_DISABLE:
795 mhi_pm_disable_transition(mhi_cntrl);
796 break;
797 default:
798 break;
799 }
800 kfree(itr);
801 }
802 }
803
mhi_pm_suspend(struct mhi_controller * mhi_cntrl)804 int mhi_pm_suspend(struct mhi_controller *mhi_cntrl)
805 {
806 struct mhi_chan *itr, *tmp;
807 struct device *dev = &mhi_cntrl->mhi_dev->dev;
808 enum mhi_pm_state new_state;
809 int ret;
810
811 if (mhi_cntrl->pm_state == MHI_PM_DISABLE)
812 return -EINVAL;
813
814 if (MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state))
815 return -EIO;
816
817 /* Return busy if there are any pending resources */
818 if (atomic_read(&mhi_cntrl->dev_wake) ||
819 atomic_read(&mhi_cntrl->pending_pkts))
820 return -EBUSY;
821
822 /* Take MHI out of M2 state */
823 read_lock_bh(&mhi_cntrl->pm_lock);
824 mhi_cntrl->wake_get(mhi_cntrl, false);
825 read_unlock_bh(&mhi_cntrl->pm_lock);
826
827 ret = wait_event_timeout(mhi_cntrl->state_event,
828 mhi_cntrl->dev_state == MHI_STATE_M0 ||
829 mhi_cntrl->dev_state == MHI_STATE_M1 ||
830 MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state),
831 msecs_to_jiffies(mhi_cntrl->timeout_ms));
832
833 read_lock_bh(&mhi_cntrl->pm_lock);
834 mhi_cntrl->wake_put(mhi_cntrl, false);
835 read_unlock_bh(&mhi_cntrl->pm_lock);
836
837 if (!ret || MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state)) {
838 dev_err(dev,
839 "Could not enter M0/M1 state");
840 return -EIO;
841 }
842
843 write_lock_irq(&mhi_cntrl->pm_lock);
844
845 if (atomic_read(&mhi_cntrl->dev_wake) ||
846 atomic_read(&mhi_cntrl->pending_pkts)) {
847 write_unlock_irq(&mhi_cntrl->pm_lock);
848 return -EBUSY;
849 }
850
851 dev_dbg(dev, "Allowing M3 transition\n");
852 new_state = mhi_tryset_pm_state(mhi_cntrl, MHI_PM_M3_ENTER);
853 if (new_state != MHI_PM_M3_ENTER) {
854 write_unlock_irq(&mhi_cntrl->pm_lock);
855 dev_err(dev,
856 "Error setting to PM state: %s from: %s\n",
857 to_mhi_pm_state_str(MHI_PM_M3_ENTER),
858 to_mhi_pm_state_str(mhi_cntrl->pm_state));
859 return -EIO;
860 }
861
862 /* Set MHI to M3 and wait for completion */
863 mhi_set_mhi_state(mhi_cntrl, MHI_STATE_M3);
864 write_unlock_irq(&mhi_cntrl->pm_lock);
865 dev_dbg(dev, "Waiting for M3 completion\n");
866
867 ret = wait_event_timeout(mhi_cntrl->state_event,
868 mhi_cntrl->dev_state == MHI_STATE_M3 ||
869 MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state),
870 msecs_to_jiffies(mhi_cntrl->timeout_ms));
871
872 if (!ret || MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state)) {
873 dev_err(dev,
874 "Did not enter M3 state, MHI state: %s, PM state: %s\n",
875 TO_MHI_STATE_STR(mhi_cntrl->dev_state),
876 to_mhi_pm_state_str(mhi_cntrl->pm_state));
877 return -EIO;
878 }
879
880 /* Notify clients about entering LPM */
881 list_for_each_entry_safe(itr, tmp, &mhi_cntrl->lpm_chans, node) {
882 mutex_lock(&itr->mutex);
883 if (itr->mhi_dev)
884 mhi_notify(itr->mhi_dev, MHI_CB_LPM_ENTER);
885 mutex_unlock(&itr->mutex);
886 }
887
888 return 0;
889 }
890 EXPORT_SYMBOL_GPL(mhi_pm_suspend);
891
mhi_pm_resume(struct mhi_controller * mhi_cntrl)892 int mhi_pm_resume(struct mhi_controller *mhi_cntrl)
893 {
894 struct mhi_chan *itr, *tmp;
895 struct device *dev = &mhi_cntrl->mhi_dev->dev;
896 enum mhi_pm_state cur_state;
897 int ret;
898
899 dev_dbg(dev, "Entered with PM state: %s, MHI state: %s\n",
900 to_mhi_pm_state_str(mhi_cntrl->pm_state),
901 TO_MHI_STATE_STR(mhi_cntrl->dev_state));
902
903 if (mhi_cntrl->pm_state == MHI_PM_DISABLE)
904 return 0;
905
906 if (MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state))
907 return -EIO;
908
909 if (mhi_get_mhi_state(mhi_cntrl) != MHI_STATE_M3)
910 return -EINVAL;
911
912 /* Notify clients about exiting LPM */
913 list_for_each_entry_safe(itr, tmp, &mhi_cntrl->lpm_chans, node) {
914 mutex_lock(&itr->mutex);
915 if (itr->mhi_dev)
916 mhi_notify(itr->mhi_dev, MHI_CB_LPM_EXIT);
917 mutex_unlock(&itr->mutex);
918 }
919
920 write_lock_irq(&mhi_cntrl->pm_lock);
921 cur_state = mhi_tryset_pm_state(mhi_cntrl, MHI_PM_M3_EXIT);
922 if (cur_state != MHI_PM_M3_EXIT) {
923 write_unlock_irq(&mhi_cntrl->pm_lock);
924 dev_info(dev,
925 "Error setting to PM state: %s from: %s\n",
926 to_mhi_pm_state_str(MHI_PM_M3_EXIT),
927 to_mhi_pm_state_str(mhi_cntrl->pm_state));
928 return -EIO;
929 }
930
931 /* Set MHI to M0 and wait for completion */
932 mhi_set_mhi_state(mhi_cntrl, MHI_STATE_M0);
933 write_unlock_irq(&mhi_cntrl->pm_lock);
934
935 ret = wait_event_timeout(mhi_cntrl->state_event,
936 mhi_cntrl->dev_state == MHI_STATE_M0 ||
937 MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state),
938 msecs_to_jiffies(mhi_cntrl->timeout_ms));
939
940 if (!ret || MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state)) {
941 dev_err(dev,
942 "Did not enter M0 state, MHI state: %s, PM state: %s\n",
943 TO_MHI_STATE_STR(mhi_cntrl->dev_state),
944 to_mhi_pm_state_str(mhi_cntrl->pm_state));
945 return -EIO;
946 }
947
948 return 0;
949 }
950 EXPORT_SYMBOL_GPL(mhi_pm_resume);
951
__mhi_device_get_sync(struct mhi_controller * mhi_cntrl)952 int __mhi_device_get_sync(struct mhi_controller *mhi_cntrl)
953 {
954 int ret;
955
956 /* Wake up the device */
957 read_lock_bh(&mhi_cntrl->pm_lock);
958 if (MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state)) {
959 read_unlock_bh(&mhi_cntrl->pm_lock);
960 return -EIO;
961 }
962 mhi_cntrl->wake_get(mhi_cntrl, true);
963 if (MHI_PM_IN_SUSPEND_STATE(mhi_cntrl->pm_state))
964 mhi_trigger_resume(mhi_cntrl);
965 read_unlock_bh(&mhi_cntrl->pm_lock);
966
967 ret = wait_event_timeout(mhi_cntrl->state_event,
968 mhi_cntrl->pm_state == MHI_PM_M0 ||
969 MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state),
970 msecs_to_jiffies(mhi_cntrl->timeout_ms));
971
972 if (!ret || MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state)) {
973 read_lock_bh(&mhi_cntrl->pm_lock);
974 mhi_cntrl->wake_put(mhi_cntrl, false);
975 read_unlock_bh(&mhi_cntrl->pm_lock);
976 return -EIO;
977 }
978
979 return 0;
980 }
981
982 /* Assert device wake db */
mhi_assert_dev_wake(struct mhi_controller * mhi_cntrl,bool force)983 static void mhi_assert_dev_wake(struct mhi_controller *mhi_cntrl, bool force)
984 {
985 unsigned long flags;
986
987 /*
988 * If force flag is set, then increment the wake count value and
989 * ring wake db
990 */
991 if (unlikely(force)) {
992 spin_lock_irqsave(&mhi_cntrl->wlock, flags);
993 atomic_inc(&mhi_cntrl->dev_wake);
994 if (MHI_WAKE_DB_FORCE_SET_VALID(mhi_cntrl->pm_state) &&
995 !mhi_cntrl->wake_set) {
996 mhi_write_db(mhi_cntrl, mhi_cntrl->wake_db, 1);
997 mhi_cntrl->wake_set = true;
998 }
999 spin_unlock_irqrestore(&mhi_cntrl->wlock, flags);
1000 } else {
1001 /*
1002 * If resources are already requested, then just increment
1003 * the wake count value and return
1004 */
1005 if (likely(atomic_add_unless(&mhi_cntrl->dev_wake, 1, 0)))
1006 return;
1007
1008 spin_lock_irqsave(&mhi_cntrl->wlock, flags);
1009 if ((atomic_inc_return(&mhi_cntrl->dev_wake) == 1) &&
1010 MHI_WAKE_DB_SET_VALID(mhi_cntrl->pm_state) &&
1011 !mhi_cntrl->wake_set) {
1012 mhi_write_db(mhi_cntrl, mhi_cntrl->wake_db, 1);
1013 mhi_cntrl->wake_set = true;
1014 }
1015 spin_unlock_irqrestore(&mhi_cntrl->wlock, flags);
1016 }
1017 }
1018
1019 /* De-assert device wake db */
mhi_deassert_dev_wake(struct mhi_controller * mhi_cntrl,bool override)1020 static void mhi_deassert_dev_wake(struct mhi_controller *mhi_cntrl,
1021 bool override)
1022 {
1023 unsigned long flags;
1024
1025 /*
1026 * Only continue if there is a single resource, else just decrement
1027 * and return
1028 */
1029 if (likely(atomic_add_unless(&mhi_cntrl->dev_wake, -1, 1)))
1030 return;
1031
1032 spin_lock_irqsave(&mhi_cntrl->wlock, flags);
1033 if ((atomic_dec_return(&mhi_cntrl->dev_wake) == 0) &&
1034 MHI_WAKE_DB_CLEAR_VALID(mhi_cntrl->pm_state) && !override &&
1035 mhi_cntrl->wake_set) {
1036 mhi_write_db(mhi_cntrl, mhi_cntrl->wake_db, 0);
1037 mhi_cntrl->wake_set = false;
1038 }
1039 spin_unlock_irqrestore(&mhi_cntrl->wlock, flags);
1040 }
1041
mhi_async_power_up(struct mhi_controller * mhi_cntrl)1042 int mhi_async_power_up(struct mhi_controller *mhi_cntrl)
1043 {
1044 enum mhi_state state;
1045 enum mhi_ee_type current_ee;
1046 enum dev_st_transition next_state;
1047 struct device *dev = &mhi_cntrl->mhi_dev->dev;
1048 u32 val;
1049 int ret;
1050
1051 dev_info(dev, "Requested to power ON\n");
1052
1053 /* Supply default wake routines if not provided by controller driver */
1054 if (!mhi_cntrl->wake_get || !mhi_cntrl->wake_put ||
1055 !mhi_cntrl->wake_toggle) {
1056 mhi_cntrl->wake_get = mhi_assert_dev_wake;
1057 mhi_cntrl->wake_put = mhi_deassert_dev_wake;
1058 mhi_cntrl->wake_toggle = (mhi_cntrl->db_access & MHI_PM_M2) ?
1059 mhi_toggle_dev_wake_nop : mhi_toggle_dev_wake;
1060 }
1061
1062 mutex_lock(&mhi_cntrl->pm_mutex);
1063 mhi_cntrl->pm_state = MHI_PM_DISABLE;
1064
1065 ret = mhi_init_irq_setup(mhi_cntrl);
1066 if (ret)
1067 goto error_setup_irq;
1068
1069 /* Setup BHI offset & INTVEC */
1070 write_lock_irq(&mhi_cntrl->pm_lock);
1071 ret = mhi_read_reg(mhi_cntrl, mhi_cntrl->regs, BHIOFF, &val);
1072 if (ret) {
1073 write_unlock_irq(&mhi_cntrl->pm_lock);
1074 goto error_bhi_offset;
1075 }
1076
1077 mhi_cntrl->bhi = mhi_cntrl->regs + val;
1078
1079 /* Setup BHIE offset */
1080 if (mhi_cntrl->fbc_download) {
1081 ret = mhi_read_reg(mhi_cntrl, mhi_cntrl->regs, BHIEOFF, &val);
1082 if (ret) {
1083 write_unlock_irq(&mhi_cntrl->pm_lock);
1084 dev_err(dev, "Error reading BHIE offset\n");
1085 goto error_bhi_offset;
1086 }
1087
1088 mhi_cntrl->bhie = mhi_cntrl->regs + val;
1089 }
1090
1091 mhi_write_reg(mhi_cntrl, mhi_cntrl->bhi, BHI_INTVEC, 0);
1092 mhi_cntrl->pm_state = MHI_PM_POR;
1093 mhi_cntrl->ee = MHI_EE_MAX;
1094 current_ee = mhi_get_exec_env(mhi_cntrl);
1095 write_unlock_irq(&mhi_cntrl->pm_lock);
1096
1097 /* Confirm that the device is in valid exec env */
1098 if (!MHI_IN_PBL(current_ee) && current_ee != MHI_EE_AMSS) {
1099 dev_err(dev, "Not a valid EE for power on\n");
1100 ret = -EIO;
1101 goto error_bhi_offset;
1102 }
1103
1104 state = mhi_get_mhi_state(mhi_cntrl);
1105 if (state == MHI_STATE_SYS_ERR) {
1106 mhi_set_mhi_state(mhi_cntrl, MHI_STATE_RESET);
1107 ret = wait_event_timeout(mhi_cntrl->state_event,
1108 MHI_PM_IN_FATAL_STATE(mhi_cntrl->pm_state) ||
1109 mhi_read_reg_field(mhi_cntrl,
1110 mhi_cntrl->regs,
1111 MHICTRL,
1112 MHICTRL_RESET_MASK,
1113 MHICTRL_RESET_SHIFT,
1114 &val) ||
1115 !val,
1116 msecs_to_jiffies(mhi_cntrl->timeout_ms));
1117 if (!ret) {
1118 ret = -EIO;
1119 dev_info(dev, "Failed to reset MHI due to syserr state\n");
1120 goto error_bhi_offset;
1121 }
1122
1123 /*
1124 * device cleares INTVEC as part of RESET processing,
1125 * re-program it
1126 */
1127 mhi_write_reg(mhi_cntrl, mhi_cntrl->bhi, BHI_INTVEC, 0);
1128 }
1129
1130 /* Transition to next state */
1131 next_state = MHI_IN_PBL(current_ee) ?
1132 DEV_ST_TRANSITION_PBL : DEV_ST_TRANSITION_READY;
1133
1134 mhi_queue_state_transition(mhi_cntrl, next_state);
1135
1136 mutex_unlock(&mhi_cntrl->pm_mutex);
1137
1138 dev_info(dev, "Power on setup success\n");
1139
1140 return 0;
1141
1142 error_bhi_offset:
1143 mhi_deinit_free_irq(mhi_cntrl);
1144
1145 error_setup_irq:
1146 mhi_cntrl->pm_state = MHI_PM_DISABLE;
1147 mutex_unlock(&mhi_cntrl->pm_mutex);
1148
1149 return ret;
1150 }
1151 EXPORT_SYMBOL_GPL(mhi_async_power_up);
1152
mhi_power_down(struct mhi_controller * mhi_cntrl,bool graceful)1153 void mhi_power_down(struct mhi_controller *mhi_cntrl, bool graceful)
1154 {
1155 enum mhi_pm_state cur_state, transition_state;
1156 struct device *dev = &mhi_cntrl->mhi_dev->dev;
1157
1158 mutex_lock(&mhi_cntrl->pm_mutex);
1159 write_lock_irq(&mhi_cntrl->pm_lock);
1160 cur_state = mhi_cntrl->pm_state;
1161 if (cur_state == MHI_PM_DISABLE) {
1162 write_unlock_irq(&mhi_cntrl->pm_lock);
1163 mutex_unlock(&mhi_cntrl->pm_mutex);
1164 return; /* Already powered down */
1165 }
1166
1167 /* If it's not a graceful shutdown, force MHI to linkdown state */
1168 transition_state = (graceful) ? MHI_PM_SHUTDOWN_PROCESS :
1169 MHI_PM_LD_ERR_FATAL_DETECT;
1170
1171 cur_state = mhi_tryset_pm_state(mhi_cntrl, transition_state);
1172 if (cur_state != transition_state) {
1173 dev_err(dev, "Failed to move to state: %s from: %s\n",
1174 to_mhi_pm_state_str(transition_state),
1175 to_mhi_pm_state_str(mhi_cntrl->pm_state));
1176 /* Force link down or error fatal detected state */
1177 mhi_cntrl->pm_state = MHI_PM_LD_ERR_FATAL_DETECT;
1178 }
1179
1180 /* mark device inactive to avoid any further host processing */
1181 mhi_cntrl->ee = MHI_EE_DISABLE_TRANSITION;
1182 mhi_cntrl->dev_state = MHI_STATE_RESET;
1183
1184 wake_up_all(&mhi_cntrl->state_event);
1185
1186 write_unlock_irq(&mhi_cntrl->pm_lock);
1187 mutex_unlock(&mhi_cntrl->pm_mutex);
1188
1189 mhi_queue_state_transition(mhi_cntrl, DEV_ST_TRANSITION_DISABLE);
1190
1191 /* Wait for shutdown to complete */
1192 flush_work(&mhi_cntrl->st_worker);
1193
1194 free_irq(mhi_cntrl->irq[0], mhi_cntrl);
1195 }
1196 EXPORT_SYMBOL_GPL(mhi_power_down);
1197
mhi_sync_power_up(struct mhi_controller * mhi_cntrl)1198 int mhi_sync_power_up(struct mhi_controller *mhi_cntrl)
1199 {
1200 int ret = mhi_async_power_up(mhi_cntrl);
1201
1202 if (ret)
1203 return ret;
1204
1205 wait_event_timeout(mhi_cntrl->state_event,
1206 MHI_IN_MISSION_MODE(mhi_cntrl->ee) ||
1207 MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state),
1208 msecs_to_jiffies(mhi_cntrl->timeout_ms));
1209
1210 ret = (MHI_IN_MISSION_MODE(mhi_cntrl->ee)) ? 0 : -ETIMEDOUT;
1211 if (ret)
1212 mhi_power_down(mhi_cntrl, false);
1213
1214 return ret;
1215 }
1216 EXPORT_SYMBOL(mhi_sync_power_up);
1217
mhi_force_rddm_mode(struct mhi_controller * mhi_cntrl)1218 int mhi_force_rddm_mode(struct mhi_controller *mhi_cntrl)
1219 {
1220 struct device *dev = &mhi_cntrl->mhi_dev->dev;
1221 int ret;
1222
1223 /* Check if device is already in RDDM */
1224 if (mhi_cntrl->ee == MHI_EE_RDDM)
1225 return 0;
1226
1227 dev_dbg(dev, "Triggering SYS_ERR to force RDDM state\n");
1228 mhi_set_mhi_state(mhi_cntrl, MHI_STATE_SYS_ERR);
1229
1230 /* Wait for RDDM event */
1231 ret = wait_event_timeout(mhi_cntrl->state_event,
1232 mhi_cntrl->ee == MHI_EE_RDDM,
1233 msecs_to_jiffies(mhi_cntrl->timeout_ms));
1234 ret = ret ? 0 : -EIO;
1235
1236 return ret;
1237 }
1238 EXPORT_SYMBOL_GPL(mhi_force_rddm_mode);
1239
mhi_device_get(struct mhi_device * mhi_dev)1240 void mhi_device_get(struct mhi_device *mhi_dev)
1241 {
1242 struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl;
1243
1244 mhi_dev->dev_wake++;
1245 read_lock_bh(&mhi_cntrl->pm_lock);
1246 if (MHI_PM_IN_SUSPEND_STATE(mhi_cntrl->pm_state))
1247 mhi_trigger_resume(mhi_cntrl);
1248
1249 mhi_cntrl->wake_get(mhi_cntrl, true);
1250 read_unlock_bh(&mhi_cntrl->pm_lock);
1251 }
1252 EXPORT_SYMBOL_GPL(mhi_device_get);
1253
mhi_device_get_sync(struct mhi_device * mhi_dev)1254 int mhi_device_get_sync(struct mhi_device *mhi_dev)
1255 {
1256 struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl;
1257 int ret;
1258
1259 ret = __mhi_device_get_sync(mhi_cntrl);
1260 if (!ret)
1261 mhi_dev->dev_wake++;
1262
1263 return ret;
1264 }
1265 EXPORT_SYMBOL_GPL(mhi_device_get_sync);
1266
mhi_device_put(struct mhi_device * mhi_dev)1267 void mhi_device_put(struct mhi_device *mhi_dev)
1268 {
1269 struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl;
1270
1271 mhi_dev->dev_wake--;
1272 read_lock_bh(&mhi_cntrl->pm_lock);
1273 if (MHI_PM_IN_SUSPEND_STATE(mhi_cntrl->pm_state))
1274 mhi_trigger_resume(mhi_cntrl);
1275
1276 mhi_cntrl->wake_put(mhi_cntrl, false);
1277 read_unlock_bh(&mhi_cntrl->pm_lock);
1278 }
1279 EXPORT_SYMBOL_GPL(mhi_device_put);
1280