]>
Commit | Line | Data |
---|---|---|
a6e2e352 MS |
1 | // SPDX-License-Identifier: GPL-2.0 |
2 | /* | |
3 | * Copyright (c) 2018-2020, The Linux Foundation. All rights reserved. | |
4 | * | |
5 | */ | |
6 | ||
7 | #include <linux/delay.h> | |
8 | #include <linux/device.h> | |
9 | #include <linux/dma-direction.h> | |
10 | #include <linux/dma-mapping.h> | |
11 | #include <linux/interrupt.h> | |
12 | #include <linux/list.h> | |
13 | #include <linux/mhi.h> | |
14 | #include <linux/module.h> | |
15 | #include <linux/slab.h> | |
16 | #include <linux/wait.h> | |
17 | #include "internal.h" | |
18 | ||
19 | /* | |
20 | * Not all MHI state transitions are synchronous. Transitions like Linkdown, | |
21 | * SYS_ERR, and shutdown can happen anytime asynchronously. This function will | |
22 | * transition to a new state only if we're allowed to. | |
23 | * | |
24 | * Priority increases as we go down. For instance, from any state in L0, the | |
25 | * transition can be made to states in L1, L2 and L3. A notable exception to | |
26 | * this rule is state DISABLE. From DISABLE state we can only transition to | |
27 | * POR state. Also, while in L2 state, user cannot jump back to previous | |
28 | * L1 or L0 states. | |
29 | * | |
30 | * Valid transitions: | |
31 | * L0: DISABLE <--> POR | |
32 | * POR <--> POR | |
33 | * POR -> M0 -> M2 --> M0 | |
34 | * POR -> FW_DL_ERR | |
35 | * FW_DL_ERR <--> FW_DL_ERR | |
36 | * M0 <--> M0 | |
37 | * M0 -> FW_DL_ERR | |
38 | * M0 -> M3_ENTER -> M3 -> M3_EXIT --> M0 | |
39 | * L1: SYS_ERR_DETECT -> SYS_ERR_PROCESS --> POR | |
40 | * L2: SHUTDOWN_PROCESS -> DISABLE | |
41 | * L3: LD_ERR_FATAL_DETECT <--> LD_ERR_FATAL_DETECT | |
42 | * LD_ERR_FATAL_DETECT -> SHUTDOWN_PROCESS | |
43 | */ | |
44 | static struct mhi_pm_transitions const dev_state_transitions[] = { | |
45 | /* L0 States */ | |
46 | { | |
47 | MHI_PM_DISABLE, | |
48 | MHI_PM_POR | |
49 | }, | |
50 | { | |
51 | MHI_PM_POR, | |
52 | MHI_PM_POR | MHI_PM_DISABLE | MHI_PM_M0 | | |
53 | MHI_PM_SYS_ERR_DETECT | MHI_PM_SHUTDOWN_PROCESS | | |
54 | MHI_PM_LD_ERR_FATAL_DETECT | MHI_PM_FW_DL_ERR | |
55 | }, | |
56 | { | |
57 | MHI_PM_M0, | |
58 | MHI_PM_M0 | MHI_PM_M2 | MHI_PM_M3_ENTER | | |
59 | MHI_PM_SYS_ERR_DETECT | MHI_PM_SHUTDOWN_PROCESS | | |
60 | MHI_PM_LD_ERR_FATAL_DETECT | MHI_PM_FW_DL_ERR | |
61 | }, | |
62 | { | |
63 | MHI_PM_M2, | |
64 | MHI_PM_M0 | MHI_PM_SYS_ERR_DETECT | MHI_PM_SHUTDOWN_PROCESS | | |
65 | MHI_PM_LD_ERR_FATAL_DETECT | |
66 | }, | |
67 | { | |
68 | MHI_PM_M3_ENTER, | |
69 | MHI_PM_M3 | MHI_PM_SYS_ERR_DETECT | MHI_PM_SHUTDOWN_PROCESS | | |
70 | MHI_PM_LD_ERR_FATAL_DETECT | |
71 | }, | |
72 | { | |
73 | MHI_PM_M3, | |
74 | MHI_PM_M3_EXIT | MHI_PM_SYS_ERR_DETECT | | |
75 | MHI_PM_SHUTDOWN_PROCESS | MHI_PM_LD_ERR_FATAL_DETECT | |
76 | }, | |
77 | { | |
78 | MHI_PM_M3_EXIT, | |
79 | MHI_PM_M0 | MHI_PM_SYS_ERR_DETECT | MHI_PM_SHUTDOWN_PROCESS | | |
80 | MHI_PM_LD_ERR_FATAL_DETECT | |
81 | }, | |
82 | { | |
83 | MHI_PM_FW_DL_ERR, | |
84 | MHI_PM_FW_DL_ERR | MHI_PM_SYS_ERR_DETECT | | |
85 | MHI_PM_SHUTDOWN_PROCESS | MHI_PM_LD_ERR_FATAL_DETECT | |
86 | }, | |
87 | /* L1 States */ | |
88 | { | |
89 | MHI_PM_SYS_ERR_DETECT, | |
90 | MHI_PM_SYS_ERR_PROCESS | MHI_PM_SHUTDOWN_PROCESS | | |
91 | MHI_PM_LD_ERR_FATAL_DETECT | |
92 | }, | |
93 | { | |
94 | MHI_PM_SYS_ERR_PROCESS, | |
95 | MHI_PM_POR | MHI_PM_SHUTDOWN_PROCESS | | |
96 | MHI_PM_LD_ERR_FATAL_DETECT | |
97 | }, | |
98 | /* L2 States */ | |
99 | { | |
100 | MHI_PM_SHUTDOWN_PROCESS, | |
101 | MHI_PM_DISABLE | MHI_PM_LD_ERR_FATAL_DETECT | |
102 | }, | |
103 | /* L3 States */ | |
104 | { | |
105 | MHI_PM_LD_ERR_FATAL_DETECT, | |
106 | MHI_PM_LD_ERR_FATAL_DETECT | MHI_PM_SHUTDOWN_PROCESS | |
107 | }, | |
108 | }; | |
109 | ||
110 | enum mhi_pm_state __must_check mhi_tryset_pm_state(struct mhi_controller *mhi_cntrl, | |
111 | enum mhi_pm_state state) | |
112 | { | |
113 | unsigned long cur_state = mhi_cntrl->pm_state; | |
114 | int index = find_last_bit(&cur_state, 32); | |
115 | ||
116 | if (unlikely(index >= ARRAY_SIZE(dev_state_transitions))) | |
117 | return cur_state; | |
118 | ||
119 | if (unlikely(dev_state_transitions[index].from_state != cur_state)) | |
120 | return cur_state; | |
121 | ||
122 | if (unlikely(!(dev_state_transitions[index].to_states & state))) | |
123 | return cur_state; | |
124 | ||
125 | mhi_cntrl->pm_state = state; | |
126 | return mhi_cntrl->pm_state; | |
127 | } | |
128 | ||
129 | void mhi_set_mhi_state(struct mhi_controller *mhi_cntrl, enum mhi_state state) | |
130 | { | |
131 | if (state == MHI_STATE_RESET) { | |
132 | mhi_write_reg_field(mhi_cntrl, mhi_cntrl->regs, MHICTRL, | |
133 | MHICTRL_RESET_MASK, MHICTRL_RESET_SHIFT, 1); | |
134 | } else { | |
135 | mhi_write_reg_field(mhi_cntrl, mhi_cntrl->regs, MHICTRL, | |
136 | MHICTRL_MHISTATE_MASK, | |
137 | MHICTRL_MHISTATE_SHIFT, state); | |
138 | } | |
139 | } | |
140 | ||
3000f85b MS |
141 | /* NOP for backward compatibility, host allowed to ring DB in M2 state */ |
142 | static void mhi_toggle_dev_wake_nop(struct mhi_controller *mhi_cntrl) | |
143 | { | |
144 | } | |
145 | ||
146 | static void mhi_toggle_dev_wake(struct mhi_controller *mhi_cntrl) | |
147 | { | |
148 | mhi_cntrl->wake_get(mhi_cntrl, false); | |
149 | mhi_cntrl->wake_put(mhi_cntrl, true); | |
150 | } | |
151 | ||
a6e2e352 MS |
152 | /* Handle device ready state transition */ |
153 | int mhi_ready_state_transition(struct mhi_controller *mhi_cntrl) | |
154 | { | |
155 | void __iomem *base = mhi_cntrl->regs; | |
156 | struct mhi_event *mhi_event; | |
157 | enum mhi_pm_state cur_state; | |
158 | struct device *dev = &mhi_cntrl->mhi_dev->dev; | |
159 | u32 reset = 1, ready = 0; | |
160 | int ret, i; | |
161 | ||
162 | /* Wait for RESET to be cleared and READY bit to be set by the device */ | |
163 | wait_event_timeout(mhi_cntrl->state_event, | |
164 | MHI_PM_IN_FATAL_STATE(mhi_cntrl->pm_state) || | |
165 | mhi_read_reg_field(mhi_cntrl, base, MHICTRL, | |
166 | MHICTRL_RESET_MASK, | |
167 | MHICTRL_RESET_SHIFT, &reset) || | |
168 | mhi_read_reg_field(mhi_cntrl, base, MHISTATUS, | |
169 | MHISTATUS_READY_MASK, | |
170 | MHISTATUS_READY_SHIFT, &ready) || | |
171 | (!reset && ready), | |
172 | msecs_to_jiffies(mhi_cntrl->timeout_ms)); | |
173 | ||
174 | /* Check if device entered error state */ | |
175 | if (MHI_PM_IN_FATAL_STATE(mhi_cntrl->pm_state)) { | |
176 | dev_err(dev, "Device link is not accessible\n"); | |
177 | return -EIO; | |
178 | } | |
179 | ||
180 | /* Timeout if device did not transition to ready state */ | |
181 | if (reset || !ready) { | |
182 | dev_err(dev, "Device Ready timeout\n"); | |
183 | return -ETIMEDOUT; | |
184 | } | |
185 | ||
186 | dev_dbg(dev, "Device in READY State\n"); | |
187 | write_lock_irq(&mhi_cntrl->pm_lock); | |
188 | cur_state = mhi_tryset_pm_state(mhi_cntrl, MHI_PM_POR); | |
189 | mhi_cntrl->dev_state = MHI_STATE_READY; | |
190 | write_unlock_irq(&mhi_cntrl->pm_lock); | |
191 | ||
192 | if (cur_state != MHI_PM_POR) { | |
193 | dev_err(dev, "Error moving to state %s from %s\n", | |
194 | to_mhi_pm_state_str(MHI_PM_POR), | |
195 | to_mhi_pm_state_str(cur_state)); | |
196 | return -EIO; | |
197 | } | |
198 | ||
199 | read_lock_bh(&mhi_cntrl->pm_lock); | |
200 | if (!MHI_REG_ACCESS_VALID(mhi_cntrl->pm_state)) { | |
201 | dev_err(dev, "Device registers not accessible\n"); | |
202 | goto error_mmio; | |
203 | } | |
204 | ||
205 | /* Configure MMIO registers */ | |
206 | ret = mhi_init_mmio(mhi_cntrl); | |
207 | if (ret) { | |
208 | dev_err(dev, "Error configuring MMIO registers\n"); | |
209 | goto error_mmio; | |
210 | } | |
211 | ||
212 | /* Add elements to all SW event rings */ | |
213 | mhi_event = mhi_cntrl->mhi_event; | |
214 | for (i = 0; i < mhi_cntrl->total_ev_rings; i++, mhi_event++) { | |
215 | struct mhi_ring *ring = &mhi_event->ring; | |
216 | ||
217 | /* Skip if this is an offload or HW event */ | |
218 | if (mhi_event->offload_ev || mhi_event->hw_ring) | |
219 | continue; | |
220 | ||
221 | ring->wp = ring->base + ring->len - ring->el_size; | |
222 | *ring->ctxt_wp = ring->iommu_base + ring->len - ring->el_size; | |
223 | /* Update all cores */ | |
224 | smp_wmb(); | |
225 | ||
226 | /* Ring the event ring db */ | |
227 | spin_lock_irq(&mhi_event->lock); | |
228 | mhi_ring_er_db(mhi_event); | |
229 | spin_unlock_irq(&mhi_event->lock); | |
230 | } | |
231 | ||
232 | /* Set MHI to M0 state */ | |
233 | mhi_set_mhi_state(mhi_cntrl, MHI_STATE_M0); | |
234 | read_unlock_bh(&mhi_cntrl->pm_lock); | |
235 | ||
236 | return 0; | |
237 | ||
238 | error_mmio: | |
239 | read_unlock_bh(&mhi_cntrl->pm_lock); | |
240 | ||
241 | return -EIO; | |
242 | } | |
243 | ||
244 | int mhi_pm_m0_transition(struct mhi_controller *mhi_cntrl) | |
245 | { | |
246 | enum mhi_pm_state cur_state; | |
247 | struct mhi_chan *mhi_chan; | |
248 | struct device *dev = &mhi_cntrl->mhi_dev->dev; | |
249 | int i; | |
250 | ||
251 | write_lock_irq(&mhi_cntrl->pm_lock); | |
252 | mhi_cntrl->dev_state = MHI_STATE_M0; | |
253 | cur_state = mhi_tryset_pm_state(mhi_cntrl, MHI_PM_M0); | |
254 | write_unlock_irq(&mhi_cntrl->pm_lock); | |
255 | if (unlikely(cur_state != MHI_PM_M0)) { | |
256 | dev_err(dev, "Unable to transition to M0 state\n"); | |
257 | return -EIO; | |
258 | } | |
259 | ||
260 | /* Wake up the device */ | |
261 | read_lock_bh(&mhi_cntrl->pm_lock); | |
262 | mhi_cntrl->wake_get(mhi_cntrl, true); | |
263 | ||
264 | /* Ring all event rings and CMD ring only if we're in mission mode */ | |
265 | if (MHI_IN_MISSION_MODE(mhi_cntrl->ee)) { | |
266 | struct mhi_event *mhi_event = mhi_cntrl->mhi_event; | |
267 | struct mhi_cmd *mhi_cmd = | |
268 | &mhi_cntrl->mhi_cmd[PRIMARY_CMD_RING]; | |
269 | ||
270 | for (i = 0; i < mhi_cntrl->total_ev_rings; i++, mhi_event++) { | |
271 | if (mhi_event->offload_ev) | |
272 | continue; | |
273 | ||
274 | spin_lock_irq(&mhi_event->lock); | |
275 | mhi_ring_er_db(mhi_event); | |
276 | spin_unlock_irq(&mhi_event->lock); | |
277 | } | |
278 | ||
279 | /* Only ring primary cmd ring if ring is not empty */ | |
280 | spin_lock_irq(&mhi_cmd->lock); | |
281 | if (mhi_cmd->ring.rp != mhi_cmd->ring.wp) | |
282 | mhi_ring_cmd_db(mhi_cntrl, mhi_cmd); | |
283 | spin_unlock_irq(&mhi_cmd->lock); | |
284 | } | |
285 | ||
286 | /* Ring channel DB registers */ | |
287 | mhi_chan = mhi_cntrl->mhi_chan; | |
288 | for (i = 0; i < mhi_cntrl->max_chan; i++, mhi_chan++) { | |
289 | struct mhi_ring *tre_ring = &mhi_chan->tre_ring; | |
290 | ||
3bc1a5f4 HK |
291 | if (mhi_chan->db_cfg.reset_req) { |
292 | write_lock_irq(&mhi_chan->lock); | |
a6e2e352 | 293 | mhi_chan->db_cfg.db_mode = true; |
3bc1a5f4 HK |
294 | write_unlock_irq(&mhi_chan->lock); |
295 | } | |
296 | ||
297 | read_lock_irq(&mhi_chan->lock); | |
a6e2e352 MS |
298 | |
299 | /* Only ring DB if ring is not empty */ | |
300 | if (tre_ring->base && tre_ring->wp != tre_ring->rp) | |
301 | mhi_ring_chan_db(mhi_cntrl, mhi_chan); | |
3bc1a5f4 | 302 | read_unlock_irq(&mhi_chan->lock); |
a6e2e352 MS |
303 | } |
304 | ||
305 | mhi_cntrl->wake_put(mhi_cntrl, false); | |
306 | read_unlock_bh(&mhi_cntrl->pm_lock); | |
307 | wake_up_all(&mhi_cntrl->state_event); | |
308 | ||
309 | return 0; | |
310 | } | |
311 | ||
312 | /* | |
313 | * After receiving the MHI state change event from the device indicating the | |
314 | * transition to M1 state, the host can transition the device to M2 state | |
315 | * for keeping it in low power state. | |
316 | */ | |
317 | void mhi_pm_m1_transition(struct mhi_controller *mhi_cntrl) | |
318 | { | |
319 | enum mhi_pm_state state; | |
320 | struct device *dev = &mhi_cntrl->mhi_dev->dev; | |
321 | ||
322 | write_lock_irq(&mhi_cntrl->pm_lock); | |
323 | state = mhi_tryset_pm_state(mhi_cntrl, MHI_PM_M2); | |
324 | if (state == MHI_PM_M2) { | |
325 | mhi_set_mhi_state(mhi_cntrl, MHI_STATE_M2); | |
326 | mhi_cntrl->dev_state = MHI_STATE_M2; | |
327 | ||
328 | write_unlock_irq(&mhi_cntrl->pm_lock); | |
329 | wake_up_all(&mhi_cntrl->state_event); | |
330 | ||
331 | /* If there are any pending resources, exit M2 immediately */ | |
332 | if (unlikely(atomic_read(&mhi_cntrl->pending_pkts) || | |
333 | atomic_read(&mhi_cntrl->dev_wake))) { | |
334 | dev_dbg(dev, | |
335 | "Exiting M2, pending_pkts: %d dev_wake: %d\n", | |
336 | atomic_read(&mhi_cntrl->pending_pkts), | |
337 | atomic_read(&mhi_cntrl->dev_wake)); | |
338 | read_lock_bh(&mhi_cntrl->pm_lock); | |
339 | mhi_cntrl->wake_get(mhi_cntrl, true); | |
340 | mhi_cntrl->wake_put(mhi_cntrl, true); | |
341 | read_unlock_bh(&mhi_cntrl->pm_lock); | |
342 | } else { | |
343 | mhi_cntrl->status_cb(mhi_cntrl, MHI_CB_IDLE); | |
344 | } | |
345 | } else { | |
346 | write_unlock_irq(&mhi_cntrl->pm_lock); | |
347 | } | |
348 | } | |
349 | ||
350 | /* MHI M3 completion handler */ | |
351 | int mhi_pm_m3_transition(struct mhi_controller *mhi_cntrl) | |
352 | { | |
353 | enum mhi_pm_state state; | |
354 | struct device *dev = &mhi_cntrl->mhi_dev->dev; | |
355 | ||
356 | write_lock_irq(&mhi_cntrl->pm_lock); | |
357 | mhi_cntrl->dev_state = MHI_STATE_M3; | |
358 | state = mhi_tryset_pm_state(mhi_cntrl, MHI_PM_M3); | |
359 | write_unlock_irq(&mhi_cntrl->pm_lock); | |
360 | if (state != MHI_PM_M3) { | |
361 | dev_err(dev, "Unable to transition to M3 state\n"); | |
362 | return -EIO; | |
363 | } | |
364 | ||
365 | wake_up_all(&mhi_cntrl->state_event); | |
366 | ||
367 | return 0; | |
368 | } | |
369 | ||
370 | /* Handle device Mission Mode transition */ | |
371 | static int mhi_pm_mission_mode_transition(struct mhi_controller *mhi_cntrl) | |
372 | { | |
373 | struct mhi_event *mhi_event; | |
374 | struct device *dev = &mhi_cntrl->mhi_dev->dev; | |
375 | int i, ret; | |
376 | ||
377 | dev_dbg(dev, "Processing Mission Mode transition\n"); | |
378 | ||
379 | write_lock_irq(&mhi_cntrl->pm_lock); | |
380 | if (MHI_REG_ACCESS_VALID(mhi_cntrl->pm_state)) | |
381 | mhi_cntrl->ee = mhi_get_exec_env(mhi_cntrl); | |
382 | write_unlock_irq(&mhi_cntrl->pm_lock); | |
383 | ||
384 | if (!MHI_IN_MISSION_MODE(mhi_cntrl->ee)) | |
385 | return -EIO; | |
386 | ||
387 | wake_up_all(&mhi_cntrl->state_event); | |
388 | ||
389 | mhi_cntrl->status_cb(mhi_cntrl, MHI_CB_EE_MISSION_MODE); | |
390 | ||
391 | /* Force MHI to be in M0 state before continuing */ | |
392 | ret = __mhi_device_get_sync(mhi_cntrl); | |
393 | if (ret) | |
394 | return ret; | |
395 | ||
396 | read_lock_bh(&mhi_cntrl->pm_lock); | |
397 | ||
398 | if (MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state)) { | |
399 | ret = -EIO; | |
400 | goto error_mission_mode; | |
401 | } | |
402 | ||
403 | /* Add elements to all HW event rings */ | |
404 | mhi_event = mhi_cntrl->mhi_event; | |
405 | for (i = 0; i < mhi_cntrl->total_ev_rings; i++, mhi_event++) { | |
406 | struct mhi_ring *ring = &mhi_event->ring; | |
407 | ||
408 | if (mhi_event->offload_ev || !mhi_event->hw_ring) | |
409 | continue; | |
410 | ||
411 | ring->wp = ring->base + ring->len - ring->el_size; | |
412 | *ring->ctxt_wp = ring->iommu_base + ring->len - ring->el_size; | |
413 | /* Update to all cores */ | |
414 | smp_wmb(); | |
415 | ||
416 | spin_lock_irq(&mhi_event->lock); | |
417 | if (MHI_DB_ACCESS_VALID(mhi_cntrl)) | |
418 | mhi_ring_er_db(mhi_event); | |
419 | spin_unlock_irq(&mhi_event->lock); | |
420 | } | |
421 | ||
422 | read_unlock_bh(&mhi_cntrl->pm_lock); | |
423 | ||
424 | /* | |
425 | * The MHI devices are only created when the client device switches its | |
426 | * Execution Environment (EE) to either SBL or AMSS states | |
427 | */ | |
428 | mhi_create_devices(mhi_cntrl); | |
429 | ||
430 | read_lock_bh(&mhi_cntrl->pm_lock); | |
431 | ||
432 | error_mission_mode: | |
433 | mhi_cntrl->wake_put(mhi_cntrl, false); | |
434 | read_unlock_bh(&mhi_cntrl->pm_lock); | |
435 | ||
436 | return ret; | |
437 | } | |
438 | ||
439 | /* Handle SYS_ERR and Shutdown transitions */ | |
440 | static void mhi_pm_disable_transition(struct mhi_controller *mhi_cntrl, | |
441 | enum mhi_pm_state transition_state) | |
442 | { | |
443 | enum mhi_pm_state cur_state, prev_state; | |
444 | struct mhi_event *mhi_event; | |
445 | struct mhi_cmd_ctxt *cmd_ctxt; | |
446 | struct mhi_cmd *mhi_cmd; | |
447 | struct mhi_event_ctxt *er_ctxt; | |
448 | struct device *dev = &mhi_cntrl->mhi_dev->dev; | |
449 | int ret, i; | |
450 | ||
451 | dev_dbg(dev, "Transitioning from PM state: %s to: %s\n", | |
452 | to_mhi_pm_state_str(mhi_cntrl->pm_state), | |
453 | to_mhi_pm_state_str(transition_state)); | |
454 | ||
455 | /* We must notify MHI control driver so it can clean up first */ | |
bc7ccce5 | 456 | if (transition_state == MHI_PM_SYS_ERR_PROCESS) |
a6e2e352 | 457 | mhi_cntrl->status_cb(mhi_cntrl, MHI_CB_SYS_ERROR); |
a6e2e352 MS |
458 | |
459 | mutex_lock(&mhi_cntrl->pm_mutex); | |
460 | write_lock_irq(&mhi_cntrl->pm_lock); | |
461 | prev_state = mhi_cntrl->pm_state; | |
462 | cur_state = mhi_tryset_pm_state(mhi_cntrl, transition_state); | |
463 | if (cur_state == transition_state) { | |
464 | mhi_cntrl->ee = MHI_EE_DISABLE_TRANSITION; | |
465 | mhi_cntrl->dev_state = MHI_STATE_RESET; | |
466 | } | |
467 | write_unlock_irq(&mhi_cntrl->pm_lock); | |
468 | ||
469 | /* Wake up threads waiting for state transition */ | |
470 | wake_up_all(&mhi_cntrl->state_event); | |
471 | ||
472 | if (cur_state != transition_state) { | |
473 | dev_err(dev, "Failed to transition to state: %s from: %s\n", | |
474 | to_mhi_pm_state_str(transition_state), | |
475 | to_mhi_pm_state_str(cur_state)); | |
476 | mutex_unlock(&mhi_cntrl->pm_mutex); | |
477 | return; | |
478 | } | |
479 | ||
480 | /* Trigger MHI RESET so that the device will not access host memory */ | |
481 | if (MHI_REG_ACCESS_VALID(prev_state)) { | |
482 | u32 in_reset = -1; | |
483 | unsigned long timeout = msecs_to_jiffies(mhi_cntrl->timeout_ms); | |
484 | ||
485 | dev_dbg(dev, "Triggering MHI Reset in device\n"); | |
486 | mhi_set_mhi_state(mhi_cntrl, MHI_STATE_RESET); | |
487 | ||
488 | /* Wait for the reset bit to be cleared by the device */ | |
489 | ret = wait_event_timeout(mhi_cntrl->state_event, | |
490 | mhi_read_reg_field(mhi_cntrl, | |
491 | mhi_cntrl->regs, | |
492 | MHICTRL, | |
493 | MHICTRL_RESET_MASK, | |
494 | MHICTRL_RESET_SHIFT, | |
495 | &in_reset) || | |
496 | !in_reset, timeout); | |
497 | if ((!ret || in_reset) && cur_state == MHI_PM_SYS_ERR_PROCESS) { | |
498 | dev_err(dev, "Device failed to exit MHI Reset state\n"); | |
499 | mutex_unlock(&mhi_cntrl->pm_mutex); | |
500 | return; | |
501 | } | |
502 | ||
503 | /* | |
504 | * Device will clear BHI_INTVEC as a part of RESET processing, | |
505 | * hence re-program it | |
506 | */ | |
507 | mhi_write_reg(mhi_cntrl, mhi_cntrl->bhi, BHI_INTVEC, 0); | |
508 | } | |
509 | ||
510 | dev_dbg(dev, | |
511 | "Waiting for all pending event ring processing to complete\n"); | |
512 | mhi_event = mhi_cntrl->mhi_event; | |
513 | for (i = 0; i < mhi_cntrl->total_ev_rings; i++, mhi_event++) { | |
514 | if (mhi_event->offload_ev) | |
515 | continue; | |
516 | tasklet_kill(&mhi_event->task); | |
517 | } | |
518 | ||
519 | /* Release lock and wait for all pending threads to complete */ | |
520 | mutex_unlock(&mhi_cntrl->pm_mutex); | |
521 | dev_dbg(dev, "Waiting for all pending threads to complete\n"); | |
522 | wake_up_all(&mhi_cntrl->state_event); | |
a6e2e352 MS |
523 | |
524 | dev_dbg(dev, "Reset all active channels and remove MHI devices\n"); | |
525 | device_for_each_child(mhi_cntrl->cntrl_dev, NULL, mhi_destroy_device); | |
526 | ||
527 | mutex_lock(&mhi_cntrl->pm_mutex); | |
528 | ||
529 | WARN_ON(atomic_read(&mhi_cntrl->dev_wake)); | |
530 | WARN_ON(atomic_read(&mhi_cntrl->pending_pkts)); | |
531 | ||
532 | /* Reset the ev rings and cmd rings */ | |
533 | dev_dbg(dev, "Resetting EV CTXT and CMD CTXT\n"); | |
534 | mhi_cmd = mhi_cntrl->mhi_cmd; | |
535 | cmd_ctxt = mhi_cntrl->mhi_ctxt->cmd_ctxt; | |
536 | for (i = 0; i < NR_OF_CMD_RINGS; i++, mhi_cmd++, cmd_ctxt++) { | |
537 | struct mhi_ring *ring = &mhi_cmd->ring; | |
538 | ||
539 | ring->rp = ring->base; | |
540 | ring->wp = ring->base; | |
541 | cmd_ctxt->rp = cmd_ctxt->rbase; | |
542 | cmd_ctxt->wp = cmd_ctxt->rbase; | |
543 | } | |
544 | ||
545 | mhi_event = mhi_cntrl->mhi_event; | |
546 | er_ctxt = mhi_cntrl->mhi_ctxt->er_ctxt; | |
547 | for (i = 0; i < mhi_cntrl->total_ev_rings; i++, er_ctxt++, | |
548 | mhi_event++) { | |
549 | struct mhi_ring *ring = &mhi_event->ring; | |
550 | ||
551 | /* Skip offload events */ | |
552 | if (mhi_event->offload_ev) | |
553 | continue; | |
554 | ||
555 | ring->rp = ring->base; | |
556 | ring->wp = ring->base; | |
557 | er_ctxt->rp = er_ctxt->rbase; | |
558 | er_ctxt->wp = er_ctxt->rbase; | |
559 | } | |
560 | ||
561 | if (cur_state == MHI_PM_SYS_ERR_PROCESS) { | |
562 | mhi_ready_state_transition(mhi_cntrl); | |
563 | } else { | |
564 | /* Move to disable state */ | |
565 | write_lock_irq(&mhi_cntrl->pm_lock); | |
566 | cur_state = mhi_tryset_pm_state(mhi_cntrl, MHI_PM_DISABLE); | |
567 | write_unlock_irq(&mhi_cntrl->pm_lock); | |
568 | if (unlikely(cur_state != MHI_PM_DISABLE)) | |
569 | dev_err(dev, "Error moving from PM state: %s to: %s\n", | |
570 | to_mhi_pm_state_str(cur_state), | |
571 | to_mhi_pm_state_str(MHI_PM_DISABLE)); | |
572 | } | |
573 | ||
574 | dev_dbg(dev, "Exiting with PM state: %s, MHI state: %s\n", | |
575 | to_mhi_pm_state_str(mhi_cntrl->pm_state), | |
576 | TO_MHI_STATE_STR(mhi_cntrl->dev_state)); | |
577 | ||
578 | mutex_unlock(&mhi_cntrl->pm_mutex); | |
579 | } | |
580 | ||
581 | /* Queue a new work item and schedule work */ | |
582 | int mhi_queue_state_transition(struct mhi_controller *mhi_cntrl, | |
583 | enum dev_st_transition state) | |
584 | { | |
585 | struct state_transition *item = kmalloc(sizeof(*item), GFP_ATOMIC); | |
586 | unsigned long flags; | |
587 | ||
588 | if (!item) | |
589 | return -ENOMEM; | |
590 | ||
591 | item->state = state; | |
592 | spin_lock_irqsave(&mhi_cntrl->transition_lock, flags); | |
593 | list_add_tail(&item->node, &mhi_cntrl->transition_list); | |
594 | spin_unlock_irqrestore(&mhi_cntrl->transition_lock, flags); | |
595 | ||
596 | schedule_work(&mhi_cntrl->st_worker); | |
597 | ||
598 | return 0; | |
599 | } | |
600 | ||
601 | /* SYS_ERR worker */ | |
bc7ccce5 | 602 | void mhi_pm_sys_err_handler(struct mhi_controller *mhi_cntrl) |
a6e2e352 | 603 | { |
bc7ccce5 HK |
604 | struct device *dev = &mhi_cntrl->mhi_dev->dev; |
605 | ||
606 | /* skip if controller supports RDDM */ | |
607 | if (mhi_cntrl->rddm_image) { | |
608 | dev_dbg(dev, "Controller supports RDDM, skip SYS_ERROR\n"); | |
609 | return; | |
610 | } | |
a6e2e352 | 611 | |
bc7ccce5 | 612 | mhi_queue_state_transition(mhi_cntrl, DEV_ST_TRANSITION_SYS_ERR); |
a6e2e352 MS |
613 | } |
614 | ||
615 | /* Device State Transition worker */ | |
616 | void mhi_pm_st_worker(struct work_struct *work) | |
617 | { | |
618 | struct state_transition *itr, *tmp; | |
619 | LIST_HEAD(head); | |
620 | struct mhi_controller *mhi_cntrl = container_of(work, | |
621 | struct mhi_controller, | |
622 | st_worker); | |
623 | struct device *dev = &mhi_cntrl->mhi_dev->dev; | |
624 | ||
625 | spin_lock_irq(&mhi_cntrl->transition_lock); | |
626 | list_splice_tail_init(&mhi_cntrl->transition_list, &head); | |
627 | spin_unlock_irq(&mhi_cntrl->transition_lock); | |
628 | ||
629 | list_for_each_entry_safe(itr, tmp, &head, node) { | |
630 | list_del(&itr->node); | |
631 | dev_dbg(dev, "Handling state transition: %s\n", | |
632 | TO_DEV_STATE_TRANS_STR(itr->state)); | |
633 | ||
634 | switch (itr->state) { | |
635 | case DEV_ST_TRANSITION_PBL: | |
636 | write_lock_irq(&mhi_cntrl->pm_lock); | |
637 | if (MHI_REG_ACCESS_VALID(mhi_cntrl->pm_state)) | |
638 | mhi_cntrl->ee = mhi_get_exec_env(mhi_cntrl); | |
639 | write_unlock_irq(&mhi_cntrl->pm_lock); | |
640 | if (MHI_IN_PBL(mhi_cntrl->ee)) | |
560e3a04 | 641 | mhi_fw_load_handler(mhi_cntrl); |
a6e2e352 MS |
642 | break; |
643 | case DEV_ST_TRANSITION_SBL: | |
644 | write_lock_irq(&mhi_cntrl->pm_lock); | |
645 | mhi_cntrl->ee = MHI_EE_SBL; | |
646 | write_unlock_irq(&mhi_cntrl->pm_lock); | |
647 | /* | |
648 | * The MHI devices are only created when the client | |
649 | * device switches its Execution Environment (EE) to | |
650 | * either SBL or AMSS states | |
651 | */ | |
652 | mhi_create_devices(mhi_cntrl); | |
653 | break; | |
654 | case DEV_ST_TRANSITION_MISSION_MODE: | |
655 | mhi_pm_mission_mode_transition(mhi_cntrl); | |
656 | break; | |
657 | case DEV_ST_TRANSITION_READY: | |
658 | mhi_ready_state_transition(mhi_cntrl); | |
659 | break; | |
bc7ccce5 HK |
660 | case DEV_ST_TRANSITION_SYS_ERR: |
661 | mhi_pm_disable_transition | |
662 | (mhi_cntrl, MHI_PM_SYS_ERR_PROCESS); | |
663 | break; | |
3c1bd004 HK |
664 | case DEV_ST_TRANSITION_DISABLE: |
665 | mhi_pm_disable_transition | |
666 | (mhi_cntrl, MHI_PM_SHUTDOWN_PROCESS); | |
667 | break; | |
a6e2e352 MS |
668 | default: |
669 | break; | |
670 | } | |
671 | kfree(itr); | |
672 | } | |
673 | } | |
674 | ||
0c6b20a1 MS |
675 | int mhi_pm_suspend(struct mhi_controller *mhi_cntrl) |
676 | { | |
677 | struct mhi_chan *itr, *tmp; | |
678 | struct device *dev = &mhi_cntrl->mhi_dev->dev; | |
679 | enum mhi_pm_state new_state; | |
680 | int ret; | |
681 | ||
682 | if (mhi_cntrl->pm_state == MHI_PM_DISABLE) | |
683 | return -EINVAL; | |
684 | ||
685 | if (MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state)) | |
686 | return -EIO; | |
687 | ||
688 | /* Return busy if there are any pending resources */ | |
515847c5 BB |
689 | if (atomic_read(&mhi_cntrl->dev_wake) || |
690 | atomic_read(&mhi_cntrl->pending_pkts)) | |
0c6b20a1 MS |
691 | return -EBUSY; |
692 | ||
693 | /* Take MHI out of M2 state */ | |
694 | read_lock_bh(&mhi_cntrl->pm_lock); | |
695 | mhi_cntrl->wake_get(mhi_cntrl, false); | |
696 | read_unlock_bh(&mhi_cntrl->pm_lock); | |
697 | ||
698 | ret = wait_event_timeout(mhi_cntrl->state_event, | |
699 | mhi_cntrl->dev_state == MHI_STATE_M0 || | |
700 | mhi_cntrl->dev_state == MHI_STATE_M1 || | |
701 | MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state), | |
702 | msecs_to_jiffies(mhi_cntrl->timeout_ms)); | |
703 | ||
704 | read_lock_bh(&mhi_cntrl->pm_lock); | |
705 | mhi_cntrl->wake_put(mhi_cntrl, false); | |
706 | read_unlock_bh(&mhi_cntrl->pm_lock); | |
707 | ||
708 | if (!ret || MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state)) { | |
709 | dev_err(dev, | |
710 | "Could not enter M0/M1 state"); | |
711 | return -EIO; | |
712 | } | |
713 | ||
714 | write_lock_irq(&mhi_cntrl->pm_lock); | |
715 | ||
515847c5 BB |
716 | if (atomic_read(&mhi_cntrl->dev_wake) || |
717 | atomic_read(&mhi_cntrl->pending_pkts)) { | |
0c6b20a1 MS |
718 | write_unlock_irq(&mhi_cntrl->pm_lock); |
719 | return -EBUSY; | |
720 | } | |
721 | ||
722 | dev_info(dev, "Allowing M3 transition\n"); | |
723 | new_state = mhi_tryset_pm_state(mhi_cntrl, MHI_PM_M3_ENTER); | |
724 | if (new_state != MHI_PM_M3_ENTER) { | |
725 | write_unlock_irq(&mhi_cntrl->pm_lock); | |
726 | dev_err(dev, | |
727 | "Error setting to PM state: %s from: %s\n", | |
728 | to_mhi_pm_state_str(MHI_PM_M3_ENTER), | |
729 | to_mhi_pm_state_str(mhi_cntrl->pm_state)); | |
730 | return -EIO; | |
731 | } | |
732 | ||
733 | /* Set MHI to M3 and wait for completion */ | |
734 | mhi_set_mhi_state(mhi_cntrl, MHI_STATE_M3); | |
735 | write_unlock_irq(&mhi_cntrl->pm_lock); | |
736 | dev_info(dev, "Wait for M3 completion\n"); | |
737 | ||
738 | ret = wait_event_timeout(mhi_cntrl->state_event, | |
739 | mhi_cntrl->dev_state == MHI_STATE_M3 || | |
740 | MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state), | |
741 | msecs_to_jiffies(mhi_cntrl->timeout_ms)); | |
742 | ||
743 | if (!ret || MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state)) { | |
744 | dev_err(dev, | |
745 | "Did not enter M3 state, MHI state: %s, PM state: %s\n", | |
746 | TO_MHI_STATE_STR(mhi_cntrl->dev_state), | |
747 | to_mhi_pm_state_str(mhi_cntrl->pm_state)); | |
748 | return -EIO; | |
749 | } | |
750 | ||
751 | /* Notify clients about entering LPM */ | |
752 | list_for_each_entry_safe(itr, tmp, &mhi_cntrl->lpm_chans, node) { | |
753 | mutex_lock(&itr->mutex); | |
754 | if (itr->mhi_dev) | |
755 | mhi_notify(itr->mhi_dev, MHI_CB_LPM_ENTER); | |
756 | mutex_unlock(&itr->mutex); | |
757 | } | |
758 | ||
759 | return 0; | |
760 | } | |
761 | EXPORT_SYMBOL_GPL(mhi_pm_suspend); | |
762 | ||
763 | int mhi_pm_resume(struct mhi_controller *mhi_cntrl) | |
764 | { | |
765 | struct mhi_chan *itr, *tmp; | |
766 | struct device *dev = &mhi_cntrl->mhi_dev->dev; | |
767 | enum mhi_pm_state cur_state; | |
768 | int ret; | |
769 | ||
770 | dev_info(dev, "Entered with PM state: %s, MHI state: %s\n", | |
771 | to_mhi_pm_state_str(mhi_cntrl->pm_state), | |
772 | TO_MHI_STATE_STR(mhi_cntrl->dev_state)); | |
773 | ||
774 | if (mhi_cntrl->pm_state == MHI_PM_DISABLE) | |
775 | return 0; | |
776 | ||
777 | if (MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state)) | |
778 | return -EIO; | |
779 | ||
780 | /* Notify clients about exiting LPM */ | |
781 | list_for_each_entry_safe(itr, tmp, &mhi_cntrl->lpm_chans, node) { | |
782 | mutex_lock(&itr->mutex); | |
783 | if (itr->mhi_dev) | |
784 | mhi_notify(itr->mhi_dev, MHI_CB_LPM_EXIT); | |
785 | mutex_unlock(&itr->mutex); | |
786 | } | |
787 | ||
788 | write_lock_irq(&mhi_cntrl->pm_lock); | |
789 | cur_state = mhi_tryset_pm_state(mhi_cntrl, MHI_PM_M3_EXIT); | |
790 | if (cur_state != MHI_PM_M3_EXIT) { | |
791 | write_unlock_irq(&mhi_cntrl->pm_lock); | |
792 | dev_info(dev, | |
793 | "Error setting to PM state: %s from: %s\n", | |
794 | to_mhi_pm_state_str(MHI_PM_M3_EXIT), | |
795 | to_mhi_pm_state_str(mhi_cntrl->pm_state)); | |
796 | return -EIO; | |
797 | } | |
798 | ||
799 | /* Set MHI to M0 and wait for completion */ | |
800 | mhi_set_mhi_state(mhi_cntrl, MHI_STATE_M0); | |
801 | write_unlock_irq(&mhi_cntrl->pm_lock); | |
802 | ||
803 | ret = wait_event_timeout(mhi_cntrl->state_event, | |
804 | mhi_cntrl->dev_state == MHI_STATE_M0 || | |
805 | MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state), | |
806 | msecs_to_jiffies(mhi_cntrl->timeout_ms)); | |
807 | ||
808 | if (!ret || MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state)) { | |
809 | dev_err(dev, | |
810 | "Did not enter M0 state, MHI state: %s, PM state: %s\n", | |
811 | TO_MHI_STATE_STR(mhi_cntrl->dev_state), | |
812 | to_mhi_pm_state_str(mhi_cntrl->pm_state)); | |
813 | return -EIO; | |
814 | } | |
815 | ||
816 | return 0; | |
817 | } | |
818 | EXPORT_SYMBOL_GPL(mhi_pm_resume); | |
819 | ||
a6e2e352 MS |
820 | int __mhi_device_get_sync(struct mhi_controller *mhi_cntrl) |
821 | { | |
822 | int ret; | |
823 | ||
824 | /* Wake up the device */ | |
825 | read_lock_bh(&mhi_cntrl->pm_lock); | |
826 | mhi_cntrl->wake_get(mhi_cntrl, true); | |
827 | if (MHI_PM_IN_SUSPEND_STATE(mhi_cntrl->pm_state)) { | |
828 | pm_wakeup_event(&mhi_cntrl->mhi_dev->dev, 0); | |
829 | mhi_cntrl->runtime_get(mhi_cntrl); | |
830 | mhi_cntrl->runtime_put(mhi_cntrl); | |
831 | } | |
832 | read_unlock_bh(&mhi_cntrl->pm_lock); | |
833 | ||
834 | ret = wait_event_timeout(mhi_cntrl->state_event, | |
835 | mhi_cntrl->pm_state == MHI_PM_M0 || | |
836 | MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state), | |
837 | msecs_to_jiffies(mhi_cntrl->timeout_ms)); | |
838 | ||
839 | if (!ret || MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state)) { | |
840 | read_lock_bh(&mhi_cntrl->pm_lock); | |
841 | mhi_cntrl->wake_put(mhi_cntrl, false); | |
842 | read_unlock_bh(&mhi_cntrl->pm_lock); | |
843 | return -EIO; | |
844 | } | |
845 | ||
846 | return 0; | |
847 | } | |
3000f85b MS |
848 | |
849 | /* Assert device wake db */ | |
850 | static void mhi_assert_dev_wake(struct mhi_controller *mhi_cntrl, bool force) | |
851 | { | |
852 | unsigned long flags; | |
853 | ||
854 | /* | |
855 | * If force flag is set, then increment the wake count value and | |
856 | * ring wake db | |
857 | */ | |
858 | if (unlikely(force)) { | |
859 | spin_lock_irqsave(&mhi_cntrl->wlock, flags); | |
860 | atomic_inc(&mhi_cntrl->dev_wake); | |
861 | if (MHI_WAKE_DB_FORCE_SET_VALID(mhi_cntrl->pm_state) && | |
862 | !mhi_cntrl->wake_set) { | |
863 | mhi_write_db(mhi_cntrl, mhi_cntrl->wake_db, 1); | |
864 | mhi_cntrl->wake_set = true; | |
865 | } | |
866 | spin_unlock_irqrestore(&mhi_cntrl->wlock, flags); | |
867 | } else { | |
868 | /* | |
869 | * If resources are already requested, then just increment | |
870 | * the wake count value and return | |
871 | */ | |
872 | if (likely(atomic_add_unless(&mhi_cntrl->dev_wake, 1, 0))) | |
873 | return; | |
874 | ||
875 | spin_lock_irqsave(&mhi_cntrl->wlock, flags); | |
876 | if ((atomic_inc_return(&mhi_cntrl->dev_wake) == 1) && | |
877 | MHI_WAKE_DB_SET_VALID(mhi_cntrl->pm_state) && | |
878 | !mhi_cntrl->wake_set) { | |
879 | mhi_write_db(mhi_cntrl, mhi_cntrl->wake_db, 1); | |
880 | mhi_cntrl->wake_set = true; | |
881 | } | |
882 | spin_unlock_irqrestore(&mhi_cntrl->wlock, flags); | |
883 | } | |
884 | } | |
885 | ||
886 | /* De-assert device wake db */ | |
887 | static void mhi_deassert_dev_wake(struct mhi_controller *mhi_cntrl, | |
888 | bool override) | |
889 | { | |
890 | unsigned long flags; | |
891 | ||
892 | /* | |
893 | * Only continue if there is a single resource, else just decrement | |
894 | * and return | |
895 | */ | |
896 | if (likely(atomic_add_unless(&mhi_cntrl->dev_wake, -1, 1))) | |
897 | return; | |
898 | ||
899 | spin_lock_irqsave(&mhi_cntrl->wlock, flags); | |
900 | if ((atomic_dec_return(&mhi_cntrl->dev_wake) == 0) && | |
901 | MHI_WAKE_DB_CLEAR_VALID(mhi_cntrl->pm_state) && !override && | |
902 | mhi_cntrl->wake_set) { | |
903 | mhi_write_db(mhi_cntrl, mhi_cntrl->wake_db, 0); | |
904 | mhi_cntrl->wake_set = false; | |
905 | } | |
906 | spin_unlock_irqrestore(&mhi_cntrl->wlock, flags); | |
907 | } | |
908 | ||
909 | int mhi_async_power_up(struct mhi_controller *mhi_cntrl) | |
910 | { | |
e18d4e9f | 911 | enum mhi_state state; |
3000f85b MS |
912 | enum mhi_ee_type current_ee; |
913 | enum dev_st_transition next_state; | |
914 | struct device *dev = &mhi_cntrl->mhi_dev->dev; | |
915 | u32 val; | |
916 | int ret; | |
917 | ||
918 | dev_info(dev, "Requested to power ON\n"); | |
919 | ||
920 | if (mhi_cntrl->nr_irqs < mhi_cntrl->total_ev_rings) | |
921 | return -EINVAL; | |
922 | ||
923 | /* Supply default wake routines if not provided by controller driver */ | |
924 | if (!mhi_cntrl->wake_get || !mhi_cntrl->wake_put || | |
925 | !mhi_cntrl->wake_toggle) { | |
926 | mhi_cntrl->wake_get = mhi_assert_dev_wake; | |
927 | mhi_cntrl->wake_put = mhi_deassert_dev_wake; | |
928 | mhi_cntrl->wake_toggle = (mhi_cntrl->db_access & MHI_PM_M2) ? | |
929 | mhi_toggle_dev_wake_nop : mhi_toggle_dev_wake; | |
930 | } | |
931 | ||
932 | mutex_lock(&mhi_cntrl->pm_mutex); | |
933 | mhi_cntrl->pm_state = MHI_PM_DISABLE; | |
934 | ||
935 | if (!mhi_cntrl->pre_init) { | |
936 | /* Setup device context */ | |
937 | ret = mhi_init_dev_ctxt(mhi_cntrl); | |
938 | if (ret) | |
939 | goto error_dev_ctxt; | |
940 | } | |
941 | ||
942 | ret = mhi_init_irq_setup(mhi_cntrl); | |
943 | if (ret) | |
944 | goto error_setup_irq; | |
945 | ||
946 | /* Setup BHI offset & INTVEC */ | |
947 | write_lock_irq(&mhi_cntrl->pm_lock); | |
948 | ret = mhi_read_reg(mhi_cntrl, mhi_cntrl->regs, BHIOFF, &val); | |
949 | if (ret) { | |
950 | write_unlock_irq(&mhi_cntrl->pm_lock); | |
951 | goto error_bhi_offset; | |
952 | } | |
953 | ||
954 | mhi_cntrl->bhi = mhi_cntrl->regs + val; | |
955 | ||
956 | /* Setup BHIE offset */ | |
957 | if (mhi_cntrl->fbc_download) { | |
958 | ret = mhi_read_reg(mhi_cntrl, mhi_cntrl->regs, BHIEOFF, &val); | |
959 | if (ret) { | |
960 | write_unlock_irq(&mhi_cntrl->pm_lock); | |
961 | dev_err(dev, "Error reading BHIE offset\n"); | |
962 | goto error_bhi_offset; | |
963 | } | |
964 | ||
965 | mhi_cntrl->bhie = mhi_cntrl->regs + val; | |
966 | } | |
967 | ||
968 | mhi_write_reg(mhi_cntrl, mhi_cntrl->bhi, BHI_INTVEC, 0); | |
969 | mhi_cntrl->pm_state = MHI_PM_POR; | |
970 | mhi_cntrl->ee = MHI_EE_MAX; | |
971 | current_ee = mhi_get_exec_env(mhi_cntrl); | |
972 | write_unlock_irq(&mhi_cntrl->pm_lock); | |
973 | ||
974 | /* Confirm that the device is in valid exec env */ | |
975 | if (!MHI_IN_PBL(current_ee) && current_ee != MHI_EE_AMSS) { | |
976 | dev_err(dev, "Not a valid EE for power on\n"); | |
977 | ret = -EIO; | |
978 | goto error_bhi_offset; | |
979 | } | |
980 | ||
e18d4e9f JH |
981 | state = mhi_get_mhi_state(mhi_cntrl); |
982 | if (state == MHI_STATE_SYS_ERR) { | |
983 | mhi_set_mhi_state(mhi_cntrl, MHI_STATE_RESET); | |
984 | ret = wait_event_timeout(mhi_cntrl->state_event, | |
985 | MHI_PM_IN_FATAL_STATE(mhi_cntrl->pm_state) || | |
986 | mhi_read_reg_field(mhi_cntrl, | |
987 | mhi_cntrl->regs, | |
988 | MHICTRL, | |
989 | MHICTRL_RESET_MASK, | |
990 | MHICTRL_RESET_SHIFT, | |
991 | &val) || | |
992 | !val, | |
993 | msecs_to_jiffies(mhi_cntrl->timeout_ms)); | |
994 | if (ret) { | |
995 | ret = -EIO; | |
996 | dev_info(dev, "Failed to reset MHI due to syserr state\n"); | |
997 | goto error_bhi_offset; | |
998 | } | |
999 | ||
1000 | /* | |
1001 | * device cleares INTVEC as part of RESET processing, | |
1002 | * re-program it | |
1003 | */ | |
1004 | mhi_write_reg(mhi_cntrl, mhi_cntrl->bhi, BHI_INTVEC, 0); | |
1005 | } | |
1006 | ||
3000f85b MS |
1007 | /* Transition to next state */ |
1008 | next_state = MHI_IN_PBL(current_ee) ? | |
1009 | DEV_ST_TRANSITION_PBL : DEV_ST_TRANSITION_READY; | |
1010 | ||
3000f85b MS |
1011 | mhi_queue_state_transition(mhi_cntrl, next_state); |
1012 | ||
1013 | mutex_unlock(&mhi_cntrl->pm_mutex); | |
1014 | ||
1015 | dev_info(dev, "Power on setup success\n"); | |
1016 | ||
1017 | return 0; | |
1018 | ||
1019 | error_bhi_offset: | |
1020 | mhi_deinit_free_irq(mhi_cntrl); | |
1021 | ||
1022 | error_setup_irq: | |
1023 | if (!mhi_cntrl->pre_init) | |
1024 | mhi_deinit_dev_ctxt(mhi_cntrl); | |
1025 | ||
1026 | error_dev_ctxt: | |
1027 | mutex_unlock(&mhi_cntrl->pm_mutex); | |
1028 | ||
1029 | return ret; | |
1030 | } | |
1031 | EXPORT_SYMBOL_GPL(mhi_async_power_up); | |
1032 | ||
1033 | void mhi_power_down(struct mhi_controller *mhi_cntrl, bool graceful) | |
1034 | { | |
1035 | enum mhi_pm_state cur_state; | |
1036 | struct device *dev = &mhi_cntrl->mhi_dev->dev; | |
1037 | ||
1038 | /* If it's not a graceful shutdown, force MHI to linkdown state */ | |
1039 | if (!graceful) { | |
1040 | mutex_lock(&mhi_cntrl->pm_mutex); | |
1041 | write_lock_irq(&mhi_cntrl->pm_lock); | |
1042 | cur_state = mhi_tryset_pm_state(mhi_cntrl, | |
1043 | MHI_PM_LD_ERR_FATAL_DETECT); | |
1044 | write_unlock_irq(&mhi_cntrl->pm_lock); | |
1045 | mutex_unlock(&mhi_cntrl->pm_mutex); | |
1046 | if (cur_state != MHI_PM_LD_ERR_FATAL_DETECT) | |
1047 | dev_dbg(dev, "Failed to move to state: %s from: %s\n", | |
1048 | to_mhi_pm_state_str(MHI_PM_LD_ERR_FATAL_DETECT), | |
1049 | to_mhi_pm_state_str(mhi_cntrl->pm_state)); | |
1050 | } | |
3c1bd004 HK |
1051 | |
1052 | mhi_queue_state_transition(mhi_cntrl, DEV_ST_TRANSITION_DISABLE); | |
1053 | ||
1054 | /* Wait for shutdown to complete */ | |
1055 | flush_work(&mhi_cntrl->st_worker); | |
1056 | ||
3000f85b MS |
1057 | mhi_deinit_free_irq(mhi_cntrl); |
1058 | ||
1059 | if (!mhi_cntrl->pre_init) { | |
1060 | /* Free all allocated resources */ | |
1061 | if (mhi_cntrl->fbc_image) { | |
1062 | mhi_free_bhie_table(mhi_cntrl, mhi_cntrl->fbc_image); | |
1063 | mhi_cntrl->fbc_image = NULL; | |
1064 | } | |
1065 | mhi_deinit_dev_ctxt(mhi_cntrl); | |
1066 | } | |
1067 | } | |
1068 | EXPORT_SYMBOL_GPL(mhi_power_down); | |
1069 | ||
1070 | int mhi_sync_power_up(struct mhi_controller *mhi_cntrl) | |
1071 | { | |
1072 | int ret = mhi_async_power_up(mhi_cntrl); | |
1073 | ||
1074 | if (ret) | |
1075 | return ret; | |
1076 | ||
1077 | wait_event_timeout(mhi_cntrl->state_event, | |
1078 | MHI_IN_MISSION_MODE(mhi_cntrl->ee) || | |
1079 | MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state), | |
1080 | msecs_to_jiffies(mhi_cntrl->timeout_ms)); | |
1081 | ||
ce312258 JH |
1082 | ret = (MHI_IN_MISSION_MODE(mhi_cntrl->ee)) ? 0 : -ETIMEDOUT; |
1083 | if (ret) | |
1084 | mhi_power_down(mhi_cntrl, false); | |
1085 | ||
1086 | return ret; | |
3000f85b MS |
1087 | } |
1088 | EXPORT_SYMBOL(mhi_sync_power_up); | |
6fdfdd27 MS |
1089 | |
1090 | int mhi_force_rddm_mode(struct mhi_controller *mhi_cntrl) | |
1091 | { | |
1092 | struct device *dev = &mhi_cntrl->mhi_dev->dev; | |
1093 | int ret; | |
1094 | ||
1095 | /* Check if device is already in RDDM */ | |
1096 | if (mhi_cntrl->ee == MHI_EE_RDDM) | |
1097 | return 0; | |
1098 | ||
1099 | dev_dbg(dev, "Triggering SYS_ERR to force RDDM state\n"); | |
1100 | mhi_set_mhi_state(mhi_cntrl, MHI_STATE_SYS_ERR); | |
1101 | ||
1102 | /* Wait for RDDM event */ | |
1103 | ret = wait_event_timeout(mhi_cntrl->state_event, | |
1104 | mhi_cntrl->ee == MHI_EE_RDDM, | |
1105 | msecs_to_jiffies(mhi_cntrl->timeout_ms)); | |
1106 | ret = ret ? 0 : -EIO; | |
1107 | ||
1108 | return ret; | |
1109 | } | |
1110 | EXPORT_SYMBOL_GPL(mhi_force_rddm_mode); | |
189ff97c MS |
1111 | |
1112 | void mhi_device_get(struct mhi_device *mhi_dev) | |
1113 | { | |
1114 | struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl; | |
1115 | ||
1116 | mhi_dev->dev_wake++; | |
1117 | read_lock_bh(&mhi_cntrl->pm_lock); | |
1118 | mhi_cntrl->wake_get(mhi_cntrl, true); | |
1119 | read_unlock_bh(&mhi_cntrl->pm_lock); | |
1120 | } | |
1121 | EXPORT_SYMBOL_GPL(mhi_device_get); | |
1122 | ||
1123 | int mhi_device_get_sync(struct mhi_device *mhi_dev) | |
1124 | { | |
1125 | struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl; | |
1126 | int ret; | |
1127 | ||
1128 | ret = __mhi_device_get_sync(mhi_cntrl); | |
1129 | if (!ret) | |
1130 | mhi_dev->dev_wake++; | |
1131 | ||
1132 | return ret; | |
1133 | } | |
1134 | EXPORT_SYMBOL_GPL(mhi_device_get_sync); | |
1135 | ||
1136 | void mhi_device_put(struct mhi_device *mhi_dev) | |
1137 | { | |
1138 | struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl; | |
1139 | ||
1140 | mhi_dev->dev_wake--; | |
1141 | read_lock_bh(&mhi_cntrl->pm_lock); | |
1142 | if (MHI_PM_IN_SUSPEND_STATE(mhi_cntrl->pm_state)) { | |
1143 | mhi_cntrl->runtime_get(mhi_cntrl); | |
1144 | mhi_cntrl->runtime_put(mhi_cntrl); | |
1145 | } | |
1146 | ||
1147 | mhi_cntrl->wake_put(mhi_cntrl, false); | |
1148 | read_unlock_bh(&mhi_cntrl->pm_lock); | |
1149 | } | |
1150 | EXPORT_SYMBOL_GPL(mhi_device_put); |