]>
Commit | Line | Data |
---|---|---|
a6e2e352 MS |
1 | // SPDX-License-Identifier: GPL-2.0 |
2 | /* | |
3 | * Copyright (c) 2018-2020, The Linux Foundation. All rights reserved. | |
4 | * | |
5 | */ | |
6 | ||
7 | #include <linux/delay.h> | |
8 | #include <linux/device.h> | |
9 | #include <linux/dma-direction.h> | |
10 | #include <linux/dma-mapping.h> | |
11 | #include <linux/interrupt.h> | |
12 | #include <linux/list.h> | |
13 | #include <linux/mhi.h> | |
14 | #include <linux/module.h> | |
15 | #include <linux/slab.h> | |
16 | #include <linux/wait.h> | |
17 | #include "internal.h" | |
18 | ||
19 | /* | |
20 | * Not all MHI state transitions are synchronous. Transitions like Linkdown, | |
21 | * SYS_ERR, and shutdown can happen anytime asynchronously. This function will | |
22 | * transition to a new state only if we're allowed to. | |
23 | * | |
24 | * Priority increases as we go down. For instance, from any state in L0, the | |
25 | * transition can be made to states in L1, L2 and L3. A notable exception to | |
26 | * this rule is state DISABLE. From DISABLE state we can only transition to | |
27 | * POR state. Also, while in L2 state, user cannot jump back to previous | |
28 | * L1 or L0 states. | |
29 | * | |
30 | * Valid transitions: | |
31 | * L0: DISABLE <--> POR | |
32 | * POR <--> POR | |
33 | * POR -> M0 -> M2 --> M0 | |
34 | * POR -> FW_DL_ERR | |
35 | * FW_DL_ERR <--> FW_DL_ERR | |
36 | * M0 <--> M0 | |
37 | * M0 -> FW_DL_ERR | |
38 | * M0 -> M3_ENTER -> M3 -> M3_EXIT --> M0 | |
39 | * L1: SYS_ERR_DETECT -> SYS_ERR_PROCESS --> POR | |
40 | * L2: SHUTDOWN_PROCESS -> DISABLE | |
41 | * L3: LD_ERR_FATAL_DETECT <--> LD_ERR_FATAL_DETECT | |
42 | * LD_ERR_FATAL_DETECT -> SHUTDOWN_PROCESS | |
43 | */ | |
44 | static struct mhi_pm_transitions const dev_state_transitions[] = { | |
45 | /* L0 States */ | |
46 | { | |
47 | MHI_PM_DISABLE, | |
48 | MHI_PM_POR | |
49 | }, | |
50 | { | |
51 | MHI_PM_POR, | |
52 | MHI_PM_POR | MHI_PM_DISABLE | MHI_PM_M0 | | |
53 | MHI_PM_SYS_ERR_DETECT | MHI_PM_SHUTDOWN_PROCESS | | |
54 | MHI_PM_LD_ERR_FATAL_DETECT | MHI_PM_FW_DL_ERR | |
55 | }, | |
56 | { | |
57 | MHI_PM_M0, | |
58 | MHI_PM_M0 | MHI_PM_M2 | MHI_PM_M3_ENTER | | |
59 | MHI_PM_SYS_ERR_DETECT | MHI_PM_SHUTDOWN_PROCESS | | |
60 | MHI_PM_LD_ERR_FATAL_DETECT | MHI_PM_FW_DL_ERR | |
61 | }, | |
62 | { | |
63 | MHI_PM_M2, | |
64 | MHI_PM_M0 | MHI_PM_SYS_ERR_DETECT | MHI_PM_SHUTDOWN_PROCESS | | |
65 | MHI_PM_LD_ERR_FATAL_DETECT | |
66 | }, | |
67 | { | |
68 | MHI_PM_M3_ENTER, | |
69 | MHI_PM_M3 | MHI_PM_SYS_ERR_DETECT | MHI_PM_SHUTDOWN_PROCESS | | |
70 | MHI_PM_LD_ERR_FATAL_DETECT | |
71 | }, | |
72 | { | |
73 | MHI_PM_M3, | |
74 | MHI_PM_M3_EXIT | MHI_PM_SYS_ERR_DETECT | | |
75 | MHI_PM_SHUTDOWN_PROCESS | MHI_PM_LD_ERR_FATAL_DETECT | |
76 | }, | |
77 | { | |
78 | MHI_PM_M3_EXIT, | |
79 | MHI_PM_M0 | MHI_PM_SYS_ERR_DETECT | MHI_PM_SHUTDOWN_PROCESS | | |
80 | MHI_PM_LD_ERR_FATAL_DETECT | |
81 | }, | |
82 | { | |
83 | MHI_PM_FW_DL_ERR, | |
84 | MHI_PM_FW_DL_ERR | MHI_PM_SYS_ERR_DETECT | | |
85 | MHI_PM_SHUTDOWN_PROCESS | MHI_PM_LD_ERR_FATAL_DETECT | |
86 | }, | |
87 | /* L1 States */ | |
88 | { | |
89 | MHI_PM_SYS_ERR_DETECT, | |
90 | MHI_PM_SYS_ERR_PROCESS | MHI_PM_SHUTDOWN_PROCESS | | |
91 | MHI_PM_LD_ERR_FATAL_DETECT | |
92 | }, | |
93 | { | |
94 | MHI_PM_SYS_ERR_PROCESS, | |
95 | MHI_PM_POR | MHI_PM_SHUTDOWN_PROCESS | | |
96 | MHI_PM_LD_ERR_FATAL_DETECT | |
97 | }, | |
98 | /* L2 States */ | |
99 | { | |
100 | MHI_PM_SHUTDOWN_PROCESS, | |
101 | MHI_PM_DISABLE | MHI_PM_LD_ERR_FATAL_DETECT | |
102 | }, | |
103 | /* L3 States */ | |
104 | { | |
105 | MHI_PM_LD_ERR_FATAL_DETECT, | |
106 | MHI_PM_LD_ERR_FATAL_DETECT | MHI_PM_SHUTDOWN_PROCESS | |
107 | }, | |
108 | }; | |
109 | ||
110 | enum mhi_pm_state __must_check mhi_tryset_pm_state(struct mhi_controller *mhi_cntrl, | |
111 | enum mhi_pm_state state) | |
112 | { | |
113 | unsigned long cur_state = mhi_cntrl->pm_state; | |
114 | int index = find_last_bit(&cur_state, 32); | |
115 | ||
116 | if (unlikely(index >= ARRAY_SIZE(dev_state_transitions))) | |
117 | return cur_state; | |
118 | ||
119 | if (unlikely(dev_state_transitions[index].from_state != cur_state)) | |
120 | return cur_state; | |
121 | ||
122 | if (unlikely(!(dev_state_transitions[index].to_states & state))) | |
123 | return cur_state; | |
124 | ||
125 | mhi_cntrl->pm_state = state; | |
126 | return mhi_cntrl->pm_state; | |
127 | } | |
128 | ||
129 | void mhi_set_mhi_state(struct mhi_controller *mhi_cntrl, enum mhi_state state) | |
130 | { | |
131 | if (state == MHI_STATE_RESET) { | |
132 | mhi_write_reg_field(mhi_cntrl, mhi_cntrl->regs, MHICTRL, | |
133 | MHICTRL_RESET_MASK, MHICTRL_RESET_SHIFT, 1); | |
134 | } else { | |
135 | mhi_write_reg_field(mhi_cntrl, mhi_cntrl->regs, MHICTRL, | |
136 | MHICTRL_MHISTATE_MASK, | |
137 | MHICTRL_MHISTATE_SHIFT, state); | |
138 | } | |
139 | } | |
140 | ||
3000f85b MS |
141 | /* NOP for backward compatibility, host allowed to ring DB in M2 state */ |
142 | static void mhi_toggle_dev_wake_nop(struct mhi_controller *mhi_cntrl) | |
143 | { | |
144 | } | |
145 | ||
146 | static void mhi_toggle_dev_wake(struct mhi_controller *mhi_cntrl) | |
147 | { | |
148 | mhi_cntrl->wake_get(mhi_cntrl, false); | |
149 | mhi_cntrl->wake_put(mhi_cntrl, true); | |
150 | } | |
151 | ||
a6e2e352 MS |
152 | /* Handle device ready state transition */ |
153 | int mhi_ready_state_transition(struct mhi_controller *mhi_cntrl) | |
154 | { | |
155 | void __iomem *base = mhi_cntrl->regs; | |
156 | struct mhi_event *mhi_event; | |
157 | enum mhi_pm_state cur_state; | |
158 | struct device *dev = &mhi_cntrl->mhi_dev->dev; | |
159 | u32 reset = 1, ready = 0; | |
160 | int ret, i; | |
161 | ||
162 | /* Wait for RESET to be cleared and READY bit to be set by the device */ | |
163 | wait_event_timeout(mhi_cntrl->state_event, | |
164 | MHI_PM_IN_FATAL_STATE(mhi_cntrl->pm_state) || | |
165 | mhi_read_reg_field(mhi_cntrl, base, MHICTRL, | |
166 | MHICTRL_RESET_MASK, | |
167 | MHICTRL_RESET_SHIFT, &reset) || | |
168 | mhi_read_reg_field(mhi_cntrl, base, MHISTATUS, | |
169 | MHISTATUS_READY_MASK, | |
170 | MHISTATUS_READY_SHIFT, &ready) || | |
171 | (!reset && ready), | |
172 | msecs_to_jiffies(mhi_cntrl->timeout_ms)); | |
173 | ||
174 | /* Check if device entered error state */ | |
175 | if (MHI_PM_IN_FATAL_STATE(mhi_cntrl->pm_state)) { | |
176 | dev_err(dev, "Device link is not accessible\n"); | |
177 | return -EIO; | |
178 | } | |
179 | ||
180 | /* Timeout if device did not transition to ready state */ | |
181 | if (reset || !ready) { | |
182 | dev_err(dev, "Device Ready timeout\n"); | |
183 | return -ETIMEDOUT; | |
184 | } | |
185 | ||
186 | dev_dbg(dev, "Device in READY State\n"); | |
187 | write_lock_irq(&mhi_cntrl->pm_lock); | |
188 | cur_state = mhi_tryset_pm_state(mhi_cntrl, MHI_PM_POR); | |
189 | mhi_cntrl->dev_state = MHI_STATE_READY; | |
190 | write_unlock_irq(&mhi_cntrl->pm_lock); | |
191 | ||
192 | if (cur_state != MHI_PM_POR) { | |
193 | dev_err(dev, "Error moving to state %s from %s\n", | |
194 | to_mhi_pm_state_str(MHI_PM_POR), | |
195 | to_mhi_pm_state_str(cur_state)); | |
196 | return -EIO; | |
197 | } | |
198 | ||
199 | read_lock_bh(&mhi_cntrl->pm_lock); | |
200 | if (!MHI_REG_ACCESS_VALID(mhi_cntrl->pm_state)) { | |
201 | dev_err(dev, "Device registers not accessible\n"); | |
202 | goto error_mmio; | |
203 | } | |
204 | ||
205 | /* Configure MMIO registers */ | |
206 | ret = mhi_init_mmio(mhi_cntrl); | |
207 | if (ret) { | |
208 | dev_err(dev, "Error configuring MMIO registers\n"); | |
209 | goto error_mmio; | |
210 | } | |
211 | ||
212 | /* Add elements to all SW event rings */ | |
213 | mhi_event = mhi_cntrl->mhi_event; | |
214 | for (i = 0; i < mhi_cntrl->total_ev_rings; i++, mhi_event++) { | |
215 | struct mhi_ring *ring = &mhi_event->ring; | |
216 | ||
217 | /* Skip if this is an offload or HW event */ | |
218 | if (mhi_event->offload_ev || mhi_event->hw_ring) | |
219 | continue; | |
220 | ||
221 | ring->wp = ring->base + ring->len - ring->el_size; | |
222 | *ring->ctxt_wp = ring->iommu_base + ring->len - ring->el_size; | |
223 | /* Update all cores */ | |
224 | smp_wmb(); | |
225 | ||
226 | /* Ring the event ring db */ | |
227 | spin_lock_irq(&mhi_event->lock); | |
228 | mhi_ring_er_db(mhi_event); | |
229 | spin_unlock_irq(&mhi_event->lock); | |
230 | } | |
231 | ||
232 | /* Set MHI to M0 state */ | |
233 | mhi_set_mhi_state(mhi_cntrl, MHI_STATE_M0); | |
234 | read_unlock_bh(&mhi_cntrl->pm_lock); | |
235 | ||
236 | return 0; | |
237 | ||
238 | error_mmio: | |
239 | read_unlock_bh(&mhi_cntrl->pm_lock); | |
240 | ||
241 | return -EIO; | |
242 | } | |
243 | ||
244 | int mhi_pm_m0_transition(struct mhi_controller *mhi_cntrl) | |
245 | { | |
246 | enum mhi_pm_state cur_state; | |
247 | struct mhi_chan *mhi_chan; | |
248 | struct device *dev = &mhi_cntrl->mhi_dev->dev; | |
249 | int i; | |
250 | ||
251 | write_lock_irq(&mhi_cntrl->pm_lock); | |
252 | mhi_cntrl->dev_state = MHI_STATE_M0; | |
253 | cur_state = mhi_tryset_pm_state(mhi_cntrl, MHI_PM_M0); | |
254 | write_unlock_irq(&mhi_cntrl->pm_lock); | |
255 | if (unlikely(cur_state != MHI_PM_M0)) { | |
256 | dev_err(dev, "Unable to transition to M0 state\n"); | |
257 | return -EIO; | |
258 | } | |
259 | ||
260 | /* Wake up the device */ | |
261 | read_lock_bh(&mhi_cntrl->pm_lock); | |
262 | mhi_cntrl->wake_get(mhi_cntrl, true); | |
263 | ||
264 | /* Ring all event rings and CMD ring only if we're in mission mode */ | |
265 | if (MHI_IN_MISSION_MODE(mhi_cntrl->ee)) { | |
266 | struct mhi_event *mhi_event = mhi_cntrl->mhi_event; | |
267 | struct mhi_cmd *mhi_cmd = | |
268 | &mhi_cntrl->mhi_cmd[PRIMARY_CMD_RING]; | |
269 | ||
270 | for (i = 0; i < mhi_cntrl->total_ev_rings; i++, mhi_event++) { | |
271 | if (mhi_event->offload_ev) | |
272 | continue; | |
273 | ||
274 | spin_lock_irq(&mhi_event->lock); | |
275 | mhi_ring_er_db(mhi_event); | |
276 | spin_unlock_irq(&mhi_event->lock); | |
277 | } | |
278 | ||
279 | /* Only ring primary cmd ring if ring is not empty */ | |
280 | spin_lock_irq(&mhi_cmd->lock); | |
281 | if (mhi_cmd->ring.rp != mhi_cmd->ring.wp) | |
282 | mhi_ring_cmd_db(mhi_cntrl, mhi_cmd); | |
283 | spin_unlock_irq(&mhi_cmd->lock); | |
284 | } | |
285 | ||
286 | /* Ring channel DB registers */ | |
287 | mhi_chan = mhi_cntrl->mhi_chan; | |
288 | for (i = 0; i < mhi_cntrl->max_chan; i++, mhi_chan++) { | |
289 | struct mhi_ring *tre_ring = &mhi_chan->tre_ring; | |
290 | ||
291 | write_lock_irq(&mhi_chan->lock); | |
292 | if (mhi_chan->db_cfg.reset_req) | |
293 | mhi_chan->db_cfg.db_mode = true; | |
294 | ||
295 | /* Only ring DB if ring is not empty */ | |
296 | if (tre_ring->base && tre_ring->wp != tre_ring->rp) | |
297 | mhi_ring_chan_db(mhi_cntrl, mhi_chan); | |
298 | write_unlock_irq(&mhi_chan->lock); | |
299 | } | |
300 | ||
301 | mhi_cntrl->wake_put(mhi_cntrl, false); | |
302 | read_unlock_bh(&mhi_cntrl->pm_lock); | |
303 | wake_up_all(&mhi_cntrl->state_event); | |
304 | ||
305 | return 0; | |
306 | } | |
307 | ||
308 | /* | |
309 | * After receiving the MHI state change event from the device indicating the | |
310 | * transition to M1 state, the host can transition the device to M2 state | |
311 | * for keeping it in low power state. | |
312 | */ | |
313 | void mhi_pm_m1_transition(struct mhi_controller *mhi_cntrl) | |
314 | { | |
315 | enum mhi_pm_state state; | |
316 | struct device *dev = &mhi_cntrl->mhi_dev->dev; | |
317 | ||
318 | write_lock_irq(&mhi_cntrl->pm_lock); | |
319 | state = mhi_tryset_pm_state(mhi_cntrl, MHI_PM_M2); | |
320 | if (state == MHI_PM_M2) { | |
321 | mhi_set_mhi_state(mhi_cntrl, MHI_STATE_M2); | |
322 | mhi_cntrl->dev_state = MHI_STATE_M2; | |
323 | ||
324 | write_unlock_irq(&mhi_cntrl->pm_lock); | |
325 | wake_up_all(&mhi_cntrl->state_event); | |
326 | ||
327 | /* If there are any pending resources, exit M2 immediately */ | |
328 | if (unlikely(atomic_read(&mhi_cntrl->pending_pkts) || | |
329 | atomic_read(&mhi_cntrl->dev_wake))) { | |
330 | dev_dbg(dev, | |
331 | "Exiting M2, pending_pkts: %d dev_wake: %d\n", | |
332 | atomic_read(&mhi_cntrl->pending_pkts), | |
333 | atomic_read(&mhi_cntrl->dev_wake)); | |
334 | read_lock_bh(&mhi_cntrl->pm_lock); | |
335 | mhi_cntrl->wake_get(mhi_cntrl, true); | |
336 | mhi_cntrl->wake_put(mhi_cntrl, true); | |
337 | read_unlock_bh(&mhi_cntrl->pm_lock); | |
338 | } else { | |
339 | mhi_cntrl->status_cb(mhi_cntrl, MHI_CB_IDLE); | |
340 | } | |
341 | } else { | |
342 | write_unlock_irq(&mhi_cntrl->pm_lock); | |
343 | } | |
344 | } | |
345 | ||
346 | /* MHI M3 completion handler */ | |
347 | int mhi_pm_m3_transition(struct mhi_controller *mhi_cntrl) | |
348 | { | |
349 | enum mhi_pm_state state; | |
350 | struct device *dev = &mhi_cntrl->mhi_dev->dev; | |
351 | ||
352 | write_lock_irq(&mhi_cntrl->pm_lock); | |
353 | mhi_cntrl->dev_state = MHI_STATE_M3; | |
354 | state = mhi_tryset_pm_state(mhi_cntrl, MHI_PM_M3); | |
355 | write_unlock_irq(&mhi_cntrl->pm_lock); | |
356 | if (state != MHI_PM_M3) { | |
357 | dev_err(dev, "Unable to transition to M3 state\n"); | |
358 | return -EIO; | |
359 | } | |
360 | ||
361 | wake_up_all(&mhi_cntrl->state_event); | |
362 | ||
363 | return 0; | |
364 | } | |
365 | ||
366 | /* Handle device Mission Mode transition */ | |
367 | static int mhi_pm_mission_mode_transition(struct mhi_controller *mhi_cntrl) | |
368 | { | |
369 | struct mhi_event *mhi_event; | |
370 | struct device *dev = &mhi_cntrl->mhi_dev->dev; | |
371 | int i, ret; | |
372 | ||
373 | dev_dbg(dev, "Processing Mission Mode transition\n"); | |
374 | ||
375 | write_lock_irq(&mhi_cntrl->pm_lock); | |
376 | if (MHI_REG_ACCESS_VALID(mhi_cntrl->pm_state)) | |
377 | mhi_cntrl->ee = mhi_get_exec_env(mhi_cntrl); | |
378 | write_unlock_irq(&mhi_cntrl->pm_lock); | |
379 | ||
380 | if (!MHI_IN_MISSION_MODE(mhi_cntrl->ee)) | |
381 | return -EIO; | |
382 | ||
383 | wake_up_all(&mhi_cntrl->state_event); | |
384 | ||
385 | mhi_cntrl->status_cb(mhi_cntrl, MHI_CB_EE_MISSION_MODE); | |
386 | ||
387 | /* Force MHI to be in M0 state before continuing */ | |
388 | ret = __mhi_device_get_sync(mhi_cntrl); | |
389 | if (ret) | |
390 | return ret; | |
391 | ||
392 | read_lock_bh(&mhi_cntrl->pm_lock); | |
393 | ||
394 | if (MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state)) { | |
395 | ret = -EIO; | |
396 | goto error_mission_mode; | |
397 | } | |
398 | ||
399 | /* Add elements to all HW event rings */ | |
400 | mhi_event = mhi_cntrl->mhi_event; | |
401 | for (i = 0; i < mhi_cntrl->total_ev_rings; i++, mhi_event++) { | |
402 | struct mhi_ring *ring = &mhi_event->ring; | |
403 | ||
404 | if (mhi_event->offload_ev || !mhi_event->hw_ring) | |
405 | continue; | |
406 | ||
407 | ring->wp = ring->base + ring->len - ring->el_size; | |
408 | *ring->ctxt_wp = ring->iommu_base + ring->len - ring->el_size; | |
409 | /* Update to all cores */ | |
410 | smp_wmb(); | |
411 | ||
412 | spin_lock_irq(&mhi_event->lock); | |
413 | if (MHI_DB_ACCESS_VALID(mhi_cntrl)) | |
414 | mhi_ring_er_db(mhi_event); | |
415 | spin_unlock_irq(&mhi_event->lock); | |
416 | } | |
417 | ||
418 | read_unlock_bh(&mhi_cntrl->pm_lock); | |
419 | ||
420 | /* | |
421 | * The MHI devices are only created when the client device switches its | |
422 | * Execution Environment (EE) to either SBL or AMSS states | |
423 | */ | |
424 | mhi_create_devices(mhi_cntrl); | |
425 | ||
426 | read_lock_bh(&mhi_cntrl->pm_lock); | |
427 | ||
428 | error_mission_mode: | |
429 | mhi_cntrl->wake_put(mhi_cntrl, false); | |
430 | read_unlock_bh(&mhi_cntrl->pm_lock); | |
431 | ||
432 | return ret; | |
433 | } | |
434 | ||
435 | /* Handle SYS_ERR and Shutdown transitions */ | |
436 | static void mhi_pm_disable_transition(struct mhi_controller *mhi_cntrl, | |
437 | enum mhi_pm_state transition_state) | |
438 | { | |
439 | enum mhi_pm_state cur_state, prev_state; | |
440 | struct mhi_event *mhi_event; | |
441 | struct mhi_cmd_ctxt *cmd_ctxt; | |
442 | struct mhi_cmd *mhi_cmd; | |
443 | struct mhi_event_ctxt *er_ctxt; | |
444 | struct device *dev = &mhi_cntrl->mhi_dev->dev; | |
445 | int ret, i; | |
446 | ||
447 | dev_dbg(dev, "Transitioning from PM state: %s to: %s\n", | |
448 | to_mhi_pm_state_str(mhi_cntrl->pm_state), | |
449 | to_mhi_pm_state_str(transition_state)); | |
450 | ||
451 | /* We must notify MHI control driver so it can clean up first */ | |
452 | if (transition_state == MHI_PM_SYS_ERR_PROCESS) { | |
453 | mhi_cntrl->status_cb(mhi_cntrl, MHI_CB_SYS_ERROR); | |
454 | } | |
455 | ||
456 | mutex_lock(&mhi_cntrl->pm_mutex); | |
457 | write_lock_irq(&mhi_cntrl->pm_lock); | |
458 | prev_state = mhi_cntrl->pm_state; | |
459 | cur_state = mhi_tryset_pm_state(mhi_cntrl, transition_state); | |
460 | if (cur_state == transition_state) { | |
461 | mhi_cntrl->ee = MHI_EE_DISABLE_TRANSITION; | |
462 | mhi_cntrl->dev_state = MHI_STATE_RESET; | |
463 | } | |
464 | write_unlock_irq(&mhi_cntrl->pm_lock); | |
465 | ||
466 | /* Wake up threads waiting for state transition */ | |
467 | wake_up_all(&mhi_cntrl->state_event); | |
468 | ||
469 | if (cur_state != transition_state) { | |
470 | dev_err(dev, "Failed to transition to state: %s from: %s\n", | |
471 | to_mhi_pm_state_str(transition_state), | |
472 | to_mhi_pm_state_str(cur_state)); | |
473 | mutex_unlock(&mhi_cntrl->pm_mutex); | |
474 | return; | |
475 | } | |
476 | ||
477 | /* Trigger MHI RESET so that the device will not access host memory */ | |
478 | if (MHI_REG_ACCESS_VALID(prev_state)) { | |
479 | u32 in_reset = -1; | |
480 | unsigned long timeout = msecs_to_jiffies(mhi_cntrl->timeout_ms); | |
481 | ||
482 | dev_dbg(dev, "Triggering MHI Reset in device\n"); | |
483 | mhi_set_mhi_state(mhi_cntrl, MHI_STATE_RESET); | |
484 | ||
485 | /* Wait for the reset bit to be cleared by the device */ | |
486 | ret = wait_event_timeout(mhi_cntrl->state_event, | |
487 | mhi_read_reg_field(mhi_cntrl, | |
488 | mhi_cntrl->regs, | |
489 | MHICTRL, | |
490 | MHICTRL_RESET_MASK, | |
491 | MHICTRL_RESET_SHIFT, | |
492 | &in_reset) || | |
493 | !in_reset, timeout); | |
494 | if ((!ret || in_reset) && cur_state == MHI_PM_SYS_ERR_PROCESS) { | |
495 | dev_err(dev, "Device failed to exit MHI Reset state\n"); | |
496 | mutex_unlock(&mhi_cntrl->pm_mutex); | |
497 | return; | |
498 | } | |
499 | ||
500 | /* | |
501 | * Device will clear BHI_INTVEC as a part of RESET processing, | |
502 | * hence re-program it | |
503 | */ | |
504 | mhi_write_reg(mhi_cntrl, mhi_cntrl->bhi, BHI_INTVEC, 0); | |
505 | } | |
506 | ||
507 | dev_dbg(dev, | |
508 | "Waiting for all pending event ring processing to complete\n"); | |
509 | mhi_event = mhi_cntrl->mhi_event; | |
510 | for (i = 0; i < mhi_cntrl->total_ev_rings; i++, mhi_event++) { | |
511 | if (mhi_event->offload_ev) | |
512 | continue; | |
513 | tasklet_kill(&mhi_event->task); | |
514 | } | |
515 | ||
516 | /* Release lock and wait for all pending threads to complete */ | |
517 | mutex_unlock(&mhi_cntrl->pm_mutex); | |
518 | dev_dbg(dev, "Waiting for all pending threads to complete\n"); | |
519 | wake_up_all(&mhi_cntrl->state_event); | |
520 | flush_work(&mhi_cntrl->st_worker); | |
521 | flush_work(&mhi_cntrl->fw_worker); | |
522 | ||
523 | dev_dbg(dev, "Reset all active channels and remove MHI devices\n"); | |
524 | device_for_each_child(mhi_cntrl->cntrl_dev, NULL, mhi_destroy_device); | |
525 | ||
526 | mutex_lock(&mhi_cntrl->pm_mutex); | |
527 | ||
528 | WARN_ON(atomic_read(&mhi_cntrl->dev_wake)); | |
529 | WARN_ON(atomic_read(&mhi_cntrl->pending_pkts)); | |
530 | ||
531 | /* Reset the ev rings and cmd rings */ | |
532 | dev_dbg(dev, "Resetting EV CTXT and CMD CTXT\n"); | |
533 | mhi_cmd = mhi_cntrl->mhi_cmd; | |
534 | cmd_ctxt = mhi_cntrl->mhi_ctxt->cmd_ctxt; | |
535 | for (i = 0; i < NR_OF_CMD_RINGS; i++, mhi_cmd++, cmd_ctxt++) { | |
536 | struct mhi_ring *ring = &mhi_cmd->ring; | |
537 | ||
538 | ring->rp = ring->base; | |
539 | ring->wp = ring->base; | |
540 | cmd_ctxt->rp = cmd_ctxt->rbase; | |
541 | cmd_ctxt->wp = cmd_ctxt->rbase; | |
542 | } | |
543 | ||
544 | mhi_event = mhi_cntrl->mhi_event; | |
545 | er_ctxt = mhi_cntrl->mhi_ctxt->er_ctxt; | |
546 | for (i = 0; i < mhi_cntrl->total_ev_rings; i++, er_ctxt++, | |
547 | mhi_event++) { | |
548 | struct mhi_ring *ring = &mhi_event->ring; | |
549 | ||
550 | /* Skip offload events */ | |
551 | if (mhi_event->offload_ev) | |
552 | continue; | |
553 | ||
554 | ring->rp = ring->base; | |
555 | ring->wp = ring->base; | |
556 | er_ctxt->rp = er_ctxt->rbase; | |
557 | er_ctxt->wp = er_ctxt->rbase; | |
558 | } | |
559 | ||
560 | if (cur_state == MHI_PM_SYS_ERR_PROCESS) { | |
561 | mhi_ready_state_transition(mhi_cntrl); | |
562 | } else { | |
563 | /* Move to disable state */ | |
564 | write_lock_irq(&mhi_cntrl->pm_lock); | |
565 | cur_state = mhi_tryset_pm_state(mhi_cntrl, MHI_PM_DISABLE); | |
566 | write_unlock_irq(&mhi_cntrl->pm_lock); | |
567 | if (unlikely(cur_state != MHI_PM_DISABLE)) | |
568 | dev_err(dev, "Error moving from PM state: %s to: %s\n", | |
569 | to_mhi_pm_state_str(cur_state), | |
570 | to_mhi_pm_state_str(MHI_PM_DISABLE)); | |
571 | } | |
572 | ||
573 | dev_dbg(dev, "Exiting with PM state: %s, MHI state: %s\n", | |
574 | to_mhi_pm_state_str(mhi_cntrl->pm_state), | |
575 | TO_MHI_STATE_STR(mhi_cntrl->dev_state)); | |
576 | ||
577 | mutex_unlock(&mhi_cntrl->pm_mutex); | |
578 | } | |
579 | ||
580 | /* Queue a new work item and schedule work */ | |
581 | int mhi_queue_state_transition(struct mhi_controller *mhi_cntrl, | |
582 | enum dev_st_transition state) | |
583 | { | |
584 | struct state_transition *item = kmalloc(sizeof(*item), GFP_ATOMIC); | |
585 | unsigned long flags; | |
586 | ||
587 | if (!item) | |
588 | return -ENOMEM; | |
589 | ||
590 | item->state = state; | |
591 | spin_lock_irqsave(&mhi_cntrl->transition_lock, flags); | |
592 | list_add_tail(&item->node, &mhi_cntrl->transition_list); | |
593 | spin_unlock_irqrestore(&mhi_cntrl->transition_lock, flags); | |
594 | ||
595 | schedule_work(&mhi_cntrl->st_worker); | |
596 | ||
597 | return 0; | |
598 | } | |
599 | ||
600 | /* SYS_ERR worker */ | |
601 | void mhi_pm_sys_err_worker(struct work_struct *work) | |
602 | { | |
603 | struct mhi_controller *mhi_cntrl = container_of(work, | |
604 | struct mhi_controller, | |
605 | syserr_worker); | |
606 | ||
607 | mhi_pm_disable_transition(mhi_cntrl, MHI_PM_SYS_ERR_PROCESS); | |
608 | } | |
609 | ||
610 | /* Device State Transition worker */ | |
611 | void mhi_pm_st_worker(struct work_struct *work) | |
612 | { | |
613 | struct state_transition *itr, *tmp; | |
614 | LIST_HEAD(head); | |
615 | struct mhi_controller *mhi_cntrl = container_of(work, | |
616 | struct mhi_controller, | |
617 | st_worker); | |
618 | struct device *dev = &mhi_cntrl->mhi_dev->dev; | |
619 | ||
620 | spin_lock_irq(&mhi_cntrl->transition_lock); | |
621 | list_splice_tail_init(&mhi_cntrl->transition_list, &head); | |
622 | spin_unlock_irq(&mhi_cntrl->transition_lock); | |
623 | ||
624 | list_for_each_entry_safe(itr, tmp, &head, node) { | |
625 | list_del(&itr->node); | |
626 | dev_dbg(dev, "Handling state transition: %s\n", | |
627 | TO_DEV_STATE_TRANS_STR(itr->state)); | |
628 | ||
629 | switch (itr->state) { | |
630 | case DEV_ST_TRANSITION_PBL: | |
631 | write_lock_irq(&mhi_cntrl->pm_lock); | |
632 | if (MHI_REG_ACCESS_VALID(mhi_cntrl->pm_state)) | |
633 | mhi_cntrl->ee = mhi_get_exec_env(mhi_cntrl); | |
634 | write_unlock_irq(&mhi_cntrl->pm_lock); | |
635 | if (MHI_IN_PBL(mhi_cntrl->ee)) | |
636 | wake_up_all(&mhi_cntrl->state_event); | |
637 | break; | |
638 | case DEV_ST_TRANSITION_SBL: | |
639 | write_lock_irq(&mhi_cntrl->pm_lock); | |
640 | mhi_cntrl->ee = MHI_EE_SBL; | |
641 | write_unlock_irq(&mhi_cntrl->pm_lock); | |
642 | /* | |
643 | * The MHI devices are only created when the client | |
644 | * device switches its Execution Environment (EE) to | |
645 | * either SBL or AMSS states | |
646 | */ | |
647 | mhi_create_devices(mhi_cntrl); | |
648 | break; | |
649 | case DEV_ST_TRANSITION_MISSION_MODE: | |
650 | mhi_pm_mission_mode_transition(mhi_cntrl); | |
651 | break; | |
652 | case DEV_ST_TRANSITION_READY: | |
653 | mhi_ready_state_transition(mhi_cntrl); | |
654 | break; | |
655 | default: | |
656 | break; | |
657 | } | |
658 | kfree(itr); | |
659 | } | |
660 | } | |
661 | ||
662 | int __mhi_device_get_sync(struct mhi_controller *mhi_cntrl) | |
663 | { | |
664 | int ret; | |
665 | ||
666 | /* Wake up the device */ | |
667 | read_lock_bh(&mhi_cntrl->pm_lock); | |
668 | mhi_cntrl->wake_get(mhi_cntrl, true); | |
669 | if (MHI_PM_IN_SUSPEND_STATE(mhi_cntrl->pm_state)) { | |
670 | pm_wakeup_event(&mhi_cntrl->mhi_dev->dev, 0); | |
671 | mhi_cntrl->runtime_get(mhi_cntrl); | |
672 | mhi_cntrl->runtime_put(mhi_cntrl); | |
673 | } | |
674 | read_unlock_bh(&mhi_cntrl->pm_lock); | |
675 | ||
676 | ret = wait_event_timeout(mhi_cntrl->state_event, | |
677 | mhi_cntrl->pm_state == MHI_PM_M0 || | |
678 | MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state), | |
679 | msecs_to_jiffies(mhi_cntrl->timeout_ms)); | |
680 | ||
681 | if (!ret || MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state)) { | |
682 | read_lock_bh(&mhi_cntrl->pm_lock); | |
683 | mhi_cntrl->wake_put(mhi_cntrl, false); | |
684 | read_unlock_bh(&mhi_cntrl->pm_lock); | |
685 | return -EIO; | |
686 | } | |
687 | ||
688 | return 0; | |
689 | } | |
3000f85b MS |
690 | |
691 | /* Assert device wake db */ | |
692 | static void mhi_assert_dev_wake(struct mhi_controller *mhi_cntrl, bool force) | |
693 | { | |
694 | unsigned long flags; | |
695 | ||
696 | /* | |
697 | * If force flag is set, then increment the wake count value and | |
698 | * ring wake db | |
699 | */ | |
700 | if (unlikely(force)) { | |
701 | spin_lock_irqsave(&mhi_cntrl->wlock, flags); | |
702 | atomic_inc(&mhi_cntrl->dev_wake); | |
703 | if (MHI_WAKE_DB_FORCE_SET_VALID(mhi_cntrl->pm_state) && | |
704 | !mhi_cntrl->wake_set) { | |
705 | mhi_write_db(mhi_cntrl, mhi_cntrl->wake_db, 1); | |
706 | mhi_cntrl->wake_set = true; | |
707 | } | |
708 | spin_unlock_irqrestore(&mhi_cntrl->wlock, flags); | |
709 | } else { | |
710 | /* | |
711 | * If resources are already requested, then just increment | |
712 | * the wake count value and return | |
713 | */ | |
714 | if (likely(atomic_add_unless(&mhi_cntrl->dev_wake, 1, 0))) | |
715 | return; | |
716 | ||
717 | spin_lock_irqsave(&mhi_cntrl->wlock, flags); | |
718 | if ((atomic_inc_return(&mhi_cntrl->dev_wake) == 1) && | |
719 | MHI_WAKE_DB_SET_VALID(mhi_cntrl->pm_state) && | |
720 | !mhi_cntrl->wake_set) { | |
721 | mhi_write_db(mhi_cntrl, mhi_cntrl->wake_db, 1); | |
722 | mhi_cntrl->wake_set = true; | |
723 | } | |
724 | spin_unlock_irqrestore(&mhi_cntrl->wlock, flags); | |
725 | } | |
726 | } | |
727 | ||
728 | /* De-assert device wake db */ | |
729 | static void mhi_deassert_dev_wake(struct mhi_controller *mhi_cntrl, | |
730 | bool override) | |
731 | { | |
732 | unsigned long flags; | |
733 | ||
734 | /* | |
735 | * Only continue if there is a single resource, else just decrement | |
736 | * and return | |
737 | */ | |
738 | if (likely(atomic_add_unless(&mhi_cntrl->dev_wake, -1, 1))) | |
739 | return; | |
740 | ||
741 | spin_lock_irqsave(&mhi_cntrl->wlock, flags); | |
742 | if ((atomic_dec_return(&mhi_cntrl->dev_wake) == 0) && | |
743 | MHI_WAKE_DB_CLEAR_VALID(mhi_cntrl->pm_state) && !override && | |
744 | mhi_cntrl->wake_set) { | |
745 | mhi_write_db(mhi_cntrl, mhi_cntrl->wake_db, 0); | |
746 | mhi_cntrl->wake_set = false; | |
747 | } | |
748 | spin_unlock_irqrestore(&mhi_cntrl->wlock, flags); | |
749 | } | |
750 | ||
751 | int mhi_async_power_up(struct mhi_controller *mhi_cntrl) | |
752 | { | |
753 | enum mhi_ee_type current_ee; | |
754 | enum dev_st_transition next_state; | |
755 | struct device *dev = &mhi_cntrl->mhi_dev->dev; | |
756 | u32 val; | |
757 | int ret; | |
758 | ||
759 | dev_info(dev, "Requested to power ON\n"); | |
760 | ||
761 | if (mhi_cntrl->nr_irqs < mhi_cntrl->total_ev_rings) | |
762 | return -EINVAL; | |
763 | ||
764 | /* Supply default wake routines if not provided by controller driver */ | |
765 | if (!mhi_cntrl->wake_get || !mhi_cntrl->wake_put || | |
766 | !mhi_cntrl->wake_toggle) { | |
767 | mhi_cntrl->wake_get = mhi_assert_dev_wake; | |
768 | mhi_cntrl->wake_put = mhi_deassert_dev_wake; | |
769 | mhi_cntrl->wake_toggle = (mhi_cntrl->db_access & MHI_PM_M2) ? | |
770 | mhi_toggle_dev_wake_nop : mhi_toggle_dev_wake; | |
771 | } | |
772 | ||
773 | mutex_lock(&mhi_cntrl->pm_mutex); | |
774 | mhi_cntrl->pm_state = MHI_PM_DISABLE; | |
775 | ||
776 | if (!mhi_cntrl->pre_init) { | |
777 | /* Setup device context */ | |
778 | ret = mhi_init_dev_ctxt(mhi_cntrl); | |
779 | if (ret) | |
780 | goto error_dev_ctxt; | |
781 | } | |
782 | ||
783 | ret = mhi_init_irq_setup(mhi_cntrl); | |
784 | if (ret) | |
785 | goto error_setup_irq; | |
786 | ||
787 | /* Setup BHI offset & INTVEC */ | |
788 | write_lock_irq(&mhi_cntrl->pm_lock); | |
789 | ret = mhi_read_reg(mhi_cntrl, mhi_cntrl->regs, BHIOFF, &val); | |
790 | if (ret) { | |
791 | write_unlock_irq(&mhi_cntrl->pm_lock); | |
792 | goto error_bhi_offset; | |
793 | } | |
794 | ||
795 | mhi_cntrl->bhi = mhi_cntrl->regs + val; | |
796 | ||
797 | /* Setup BHIE offset */ | |
798 | if (mhi_cntrl->fbc_download) { | |
799 | ret = mhi_read_reg(mhi_cntrl, mhi_cntrl->regs, BHIEOFF, &val); | |
800 | if (ret) { | |
801 | write_unlock_irq(&mhi_cntrl->pm_lock); | |
802 | dev_err(dev, "Error reading BHIE offset\n"); | |
803 | goto error_bhi_offset; | |
804 | } | |
805 | ||
806 | mhi_cntrl->bhie = mhi_cntrl->regs + val; | |
807 | } | |
808 | ||
809 | mhi_write_reg(mhi_cntrl, mhi_cntrl->bhi, BHI_INTVEC, 0); | |
810 | mhi_cntrl->pm_state = MHI_PM_POR; | |
811 | mhi_cntrl->ee = MHI_EE_MAX; | |
812 | current_ee = mhi_get_exec_env(mhi_cntrl); | |
813 | write_unlock_irq(&mhi_cntrl->pm_lock); | |
814 | ||
815 | /* Confirm that the device is in valid exec env */ | |
816 | if (!MHI_IN_PBL(current_ee) && current_ee != MHI_EE_AMSS) { | |
817 | dev_err(dev, "Not a valid EE for power on\n"); | |
818 | ret = -EIO; | |
819 | goto error_bhi_offset; | |
820 | } | |
821 | ||
822 | /* Transition to next state */ | |
823 | next_state = MHI_IN_PBL(current_ee) ? | |
824 | DEV_ST_TRANSITION_PBL : DEV_ST_TRANSITION_READY; | |
825 | ||
826 | if (next_state == DEV_ST_TRANSITION_PBL) | |
827 | schedule_work(&mhi_cntrl->fw_worker); | |
828 | ||
829 | mhi_queue_state_transition(mhi_cntrl, next_state); | |
830 | ||
831 | mutex_unlock(&mhi_cntrl->pm_mutex); | |
832 | ||
833 | dev_info(dev, "Power on setup success\n"); | |
834 | ||
835 | return 0; | |
836 | ||
837 | error_bhi_offset: | |
838 | mhi_deinit_free_irq(mhi_cntrl); | |
839 | ||
840 | error_setup_irq: | |
841 | if (!mhi_cntrl->pre_init) | |
842 | mhi_deinit_dev_ctxt(mhi_cntrl); | |
843 | ||
844 | error_dev_ctxt: | |
845 | mutex_unlock(&mhi_cntrl->pm_mutex); | |
846 | ||
847 | return ret; | |
848 | } | |
849 | EXPORT_SYMBOL_GPL(mhi_async_power_up); | |
850 | ||
851 | void mhi_power_down(struct mhi_controller *mhi_cntrl, bool graceful) | |
852 | { | |
853 | enum mhi_pm_state cur_state; | |
854 | struct device *dev = &mhi_cntrl->mhi_dev->dev; | |
855 | ||
856 | /* If it's not a graceful shutdown, force MHI to linkdown state */ | |
857 | if (!graceful) { | |
858 | mutex_lock(&mhi_cntrl->pm_mutex); | |
859 | write_lock_irq(&mhi_cntrl->pm_lock); | |
860 | cur_state = mhi_tryset_pm_state(mhi_cntrl, | |
861 | MHI_PM_LD_ERR_FATAL_DETECT); | |
862 | write_unlock_irq(&mhi_cntrl->pm_lock); | |
863 | mutex_unlock(&mhi_cntrl->pm_mutex); | |
864 | if (cur_state != MHI_PM_LD_ERR_FATAL_DETECT) | |
865 | dev_dbg(dev, "Failed to move to state: %s from: %s\n", | |
866 | to_mhi_pm_state_str(MHI_PM_LD_ERR_FATAL_DETECT), | |
867 | to_mhi_pm_state_str(mhi_cntrl->pm_state)); | |
868 | } | |
869 | mhi_pm_disable_transition(mhi_cntrl, MHI_PM_SHUTDOWN_PROCESS); | |
870 | mhi_deinit_free_irq(mhi_cntrl); | |
871 | ||
872 | if (!mhi_cntrl->pre_init) { | |
873 | /* Free all allocated resources */ | |
874 | if (mhi_cntrl->fbc_image) { | |
875 | mhi_free_bhie_table(mhi_cntrl, mhi_cntrl->fbc_image); | |
876 | mhi_cntrl->fbc_image = NULL; | |
877 | } | |
878 | mhi_deinit_dev_ctxt(mhi_cntrl); | |
879 | } | |
880 | } | |
881 | EXPORT_SYMBOL_GPL(mhi_power_down); | |
882 | ||
883 | int mhi_sync_power_up(struct mhi_controller *mhi_cntrl) | |
884 | { | |
885 | int ret = mhi_async_power_up(mhi_cntrl); | |
886 | ||
887 | if (ret) | |
888 | return ret; | |
889 | ||
890 | wait_event_timeout(mhi_cntrl->state_event, | |
891 | MHI_IN_MISSION_MODE(mhi_cntrl->ee) || | |
892 | MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state), | |
893 | msecs_to_jiffies(mhi_cntrl->timeout_ms)); | |
894 | ||
895 | return (MHI_IN_MISSION_MODE(mhi_cntrl->ee)) ? 0 : -EIO; | |
896 | } | |
897 | EXPORT_SYMBOL(mhi_sync_power_up); |