]>
Commit | Line | Data |
---|---|---|
a6e2e352 MS |
1 | // SPDX-License-Identifier: GPL-2.0 |
2 | /* | |
3 | * Copyright (c) 2018-2020, The Linux Foundation. All rights reserved. | |
4 | * | |
5 | */ | |
6 | ||
7 | #include <linux/delay.h> | |
8 | #include <linux/device.h> | |
9 | #include <linux/dma-direction.h> | |
10 | #include <linux/dma-mapping.h> | |
11 | #include <linux/interrupt.h> | |
12 | #include <linux/list.h> | |
13 | #include <linux/mhi.h> | |
14 | #include <linux/module.h> | |
15 | #include <linux/slab.h> | |
16 | #include <linux/wait.h> | |
17 | #include "internal.h" | |
18 | ||
19 | /* | |
20 | * Not all MHI state transitions are synchronous. Transitions like Linkdown, | |
21 | * SYS_ERR, and shutdown can happen anytime asynchronously. This function will | |
22 | * transition to a new state only if we're allowed to. | |
23 | * | |
24 | * Priority increases as we go down. For instance, from any state in L0, the | |
25 | * transition can be made to states in L1, L2 and L3. A notable exception to | |
26 | * this rule is state DISABLE. From DISABLE state we can only transition to | |
27 | * POR state. Also, while in L2 state, user cannot jump back to previous | |
28 | * L1 or L0 states. | |
29 | * | |
30 | * Valid transitions: | |
31 | * L0: DISABLE <--> POR | |
32 | * POR <--> POR | |
33 | * POR -> M0 -> M2 --> M0 | |
34 | * POR -> FW_DL_ERR | |
35 | * FW_DL_ERR <--> FW_DL_ERR | |
36 | * M0 <--> M0 | |
37 | * M0 -> FW_DL_ERR | |
38 | * M0 -> M3_ENTER -> M3 -> M3_EXIT --> M0 | |
39 | * L1: SYS_ERR_DETECT -> SYS_ERR_PROCESS --> POR | |
a03c7a86 BB |
40 | * L2: SHUTDOWN_PROCESS -> LD_ERR_FATAL_DETECT |
41 | * SHUTDOWN_PROCESS -> DISABLE | |
a6e2e352 | 42 | * L3: LD_ERR_FATAL_DETECT <--> LD_ERR_FATAL_DETECT |
a03c7a86 | 43 | * LD_ERR_FATAL_DETECT -> DISABLE |
a6e2e352 MS |
44 | */ |
45 | static struct mhi_pm_transitions const dev_state_transitions[] = { | |
46 | /* L0 States */ | |
47 | { | |
48 | MHI_PM_DISABLE, | |
49 | MHI_PM_POR | |
50 | }, | |
51 | { | |
52 | MHI_PM_POR, | |
53 | MHI_PM_POR | MHI_PM_DISABLE | MHI_PM_M0 | | |
54 | MHI_PM_SYS_ERR_DETECT | MHI_PM_SHUTDOWN_PROCESS | | |
55 | MHI_PM_LD_ERR_FATAL_DETECT | MHI_PM_FW_DL_ERR | |
56 | }, | |
57 | { | |
58 | MHI_PM_M0, | |
59 | MHI_PM_M0 | MHI_PM_M2 | MHI_PM_M3_ENTER | | |
60 | MHI_PM_SYS_ERR_DETECT | MHI_PM_SHUTDOWN_PROCESS | | |
61 | MHI_PM_LD_ERR_FATAL_DETECT | MHI_PM_FW_DL_ERR | |
62 | }, | |
63 | { | |
64 | MHI_PM_M2, | |
65 | MHI_PM_M0 | MHI_PM_SYS_ERR_DETECT | MHI_PM_SHUTDOWN_PROCESS | | |
66 | MHI_PM_LD_ERR_FATAL_DETECT | |
67 | }, | |
68 | { | |
69 | MHI_PM_M3_ENTER, | |
70 | MHI_PM_M3 | MHI_PM_SYS_ERR_DETECT | MHI_PM_SHUTDOWN_PROCESS | | |
71 | MHI_PM_LD_ERR_FATAL_DETECT | |
72 | }, | |
73 | { | |
74 | MHI_PM_M3, | |
75 | MHI_PM_M3_EXIT | MHI_PM_SYS_ERR_DETECT | | |
a03c7a86 | 76 | MHI_PM_LD_ERR_FATAL_DETECT |
a6e2e352 MS |
77 | }, |
78 | { | |
79 | MHI_PM_M3_EXIT, | |
80 | MHI_PM_M0 | MHI_PM_SYS_ERR_DETECT | MHI_PM_SHUTDOWN_PROCESS | | |
81 | MHI_PM_LD_ERR_FATAL_DETECT | |
82 | }, | |
83 | { | |
84 | MHI_PM_FW_DL_ERR, | |
85 | MHI_PM_FW_DL_ERR | MHI_PM_SYS_ERR_DETECT | | |
86 | MHI_PM_SHUTDOWN_PROCESS | MHI_PM_LD_ERR_FATAL_DETECT | |
87 | }, | |
88 | /* L1 States */ | |
89 | { | |
90 | MHI_PM_SYS_ERR_DETECT, | |
91 | MHI_PM_SYS_ERR_PROCESS | MHI_PM_SHUTDOWN_PROCESS | | |
92 | MHI_PM_LD_ERR_FATAL_DETECT | |
93 | }, | |
94 | { | |
95 | MHI_PM_SYS_ERR_PROCESS, | |
96 | MHI_PM_POR | MHI_PM_SHUTDOWN_PROCESS | | |
97 | MHI_PM_LD_ERR_FATAL_DETECT | |
98 | }, | |
99 | /* L2 States */ | |
100 | { | |
101 | MHI_PM_SHUTDOWN_PROCESS, | |
102 | MHI_PM_DISABLE | MHI_PM_LD_ERR_FATAL_DETECT | |
103 | }, | |
104 | /* L3 States */ | |
105 | { | |
106 | MHI_PM_LD_ERR_FATAL_DETECT, | |
a03c7a86 | 107 | MHI_PM_LD_ERR_FATAL_DETECT | MHI_PM_DISABLE |
a6e2e352 MS |
108 | }, |
109 | }; | |
110 | ||
111 | enum mhi_pm_state __must_check mhi_tryset_pm_state(struct mhi_controller *mhi_cntrl, | |
112 | enum mhi_pm_state state) | |
113 | { | |
114 | unsigned long cur_state = mhi_cntrl->pm_state; | |
115 | int index = find_last_bit(&cur_state, 32); | |
116 | ||
117 | if (unlikely(index >= ARRAY_SIZE(dev_state_transitions))) | |
118 | return cur_state; | |
119 | ||
120 | if (unlikely(dev_state_transitions[index].from_state != cur_state)) | |
121 | return cur_state; | |
122 | ||
123 | if (unlikely(!(dev_state_transitions[index].to_states & state))) | |
124 | return cur_state; | |
125 | ||
126 | mhi_cntrl->pm_state = state; | |
127 | return mhi_cntrl->pm_state; | |
128 | } | |
129 | ||
130 | void mhi_set_mhi_state(struct mhi_controller *mhi_cntrl, enum mhi_state state) | |
131 | { | |
132 | if (state == MHI_STATE_RESET) { | |
133 | mhi_write_reg_field(mhi_cntrl, mhi_cntrl->regs, MHICTRL, | |
134 | MHICTRL_RESET_MASK, MHICTRL_RESET_SHIFT, 1); | |
135 | } else { | |
136 | mhi_write_reg_field(mhi_cntrl, mhi_cntrl->regs, MHICTRL, | |
137 | MHICTRL_MHISTATE_MASK, | |
138 | MHICTRL_MHISTATE_SHIFT, state); | |
139 | } | |
140 | } | |
141 | ||
3000f85b MS |
142 | /* NOP for backward compatibility, host allowed to ring DB in M2 state */ |
143 | static void mhi_toggle_dev_wake_nop(struct mhi_controller *mhi_cntrl) | |
144 | { | |
145 | } | |
146 | ||
147 | static void mhi_toggle_dev_wake(struct mhi_controller *mhi_cntrl) | |
148 | { | |
149 | mhi_cntrl->wake_get(mhi_cntrl, false); | |
150 | mhi_cntrl->wake_put(mhi_cntrl, true); | |
151 | } | |
152 | ||
a6e2e352 MS |
153 | /* Handle device ready state transition */ |
154 | int mhi_ready_state_transition(struct mhi_controller *mhi_cntrl) | |
155 | { | |
156 | void __iomem *base = mhi_cntrl->regs; | |
157 | struct mhi_event *mhi_event; | |
158 | enum mhi_pm_state cur_state; | |
159 | struct device *dev = &mhi_cntrl->mhi_dev->dev; | |
160 | u32 reset = 1, ready = 0; | |
161 | int ret, i; | |
162 | ||
163 | /* Wait for RESET to be cleared and READY bit to be set by the device */ | |
164 | wait_event_timeout(mhi_cntrl->state_event, | |
165 | MHI_PM_IN_FATAL_STATE(mhi_cntrl->pm_state) || | |
166 | mhi_read_reg_field(mhi_cntrl, base, MHICTRL, | |
167 | MHICTRL_RESET_MASK, | |
168 | MHICTRL_RESET_SHIFT, &reset) || | |
169 | mhi_read_reg_field(mhi_cntrl, base, MHISTATUS, | |
170 | MHISTATUS_READY_MASK, | |
171 | MHISTATUS_READY_SHIFT, &ready) || | |
172 | (!reset && ready), | |
173 | msecs_to_jiffies(mhi_cntrl->timeout_ms)); | |
174 | ||
175 | /* Check if device entered error state */ | |
176 | if (MHI_PM_IN_FATAL_STATE(mhi_cntrl->pm_state)) { | |
177 | dev_err(dev, "Device link is not accessible\n"); | |
178 | return -EIO; | |
179 | } | |
180 | ||
181 | /* Timeout if device did not transition to ready state */ | |
182 | if (reset || !ready) { | |
183 | dev_err(dev, "Device Ready timeout\n"); | |
184 | return -ETIMEDOUT; | |
185 | } | |
186 | ||
187 | dev_dbg(dev, "Device in READY State\n"); | |
188 | write_lock_irq(&mhi_cntrl->pm_lock); | |
189 | cur_state = mhi_tryset_pm_state(mhi_cntrl, MHI_PM_POR); | |
190 | mhi_cntrl->dev_state = MHI_STATE_READY; | |
191 | write_unlock_irq(&mhi_cntrl->pm_lock); | |
192 | ||
193 | if (cur_state != MHI_PM_POR) { | |
194 | dev_err(dev, "Error moving to state %s from %s\n", | |
195 | to_mhi_pm_state_str(MHI_PM_POR), | |
196 | to_mhi_pm_state_str(cur_state)); | |
197 | return -EIO; | |
198 | } | |
199 | ||
200 | read_lock_bh(&mhi_cntrl->pm_lock); | |
201 | if (!MHI_REG_ACCESS_VALID(mhi_cntrl->pm_state)) { | |
202 | dev_err(dev, "Device registers not accessible\n"); | |
203 | goto error_mmio; | |
204 | } | |
205 | ||
206 | /* Configure MMIO registers */ | |
207 | ret = mhi_init_mmio(mhi_cntrl); | |
208 | if (ret) { | |
209 | dev_err(dev, "Error configuring MMIO registers\n"); | |
210 | goto error_mmio; | |
211 | } | |
212 | ||
213 | /* Add elements to all SW event rings */ | |
214 | mhi_event = mhi_cntrl->mhi_event; | |
215 | for (i = 0; i < mhi_cntrl->total_ev_rings; i++, mhi_event++) { | |
216 | struct mhi_ring *ring = &mhi_event->ring; | |
217 | ||
218 | /* Skip if this is an offload or HW event */ | |
219 | if (mhi_event->offload_ev || mhi_event->hw_ring) | |
220 | continue; | |
221 | ||
222 | ring->wp = ring->base + ring->len - ring->el_size; | |
223 | *ring->ctxt_wp = ring->iommu_base + ring->len - ring->el_size; | |
224 | /* Update all cores */ | |
225 | smp_wmb(); | |
226 | ||
227 | /* Ring the event ring db */ | |
228 | spin_lock_irq(&mhi_event->lock); | |
229 | mhi_ring_er_db(mhi_event); | |
230 | spin_unlock_irq(&mhi_event->lock); | |
231 | } | |
232 | ||
233 | /* Set MHI to M0 state */ | |
234 | mhi_set_mhi_state(mhi_cntrl, MHI_STATE_M0); | |
235 | read_unlock_bh(&mhi_cntrl->pm_lock); | |
236 | ||
237 | return 0; | |
238 | ||
239 | error_mmio: | |
240 | read_unlock_bh(&mhi_cntrl->pm_lock); | |
241 | ||
242 | return -EIO; | |
243 | } | |
244 | ||
245 | int mhi_pm_m0_transition(struct mhi_controller *mhi_cntrl) | |
246 | { | |
247 | enum mhi_pm_state cur_state; | |
248 | struct mhi_chan *mhi_chan; | |
249 | struct device *dev = &mhi_cntrl->mhi_dev->dev; | |
250 | int i; | |
251 | ||
252 | write_lock_irq(&mhi_cntrl->pm_lock); | |
253 | mhi_cntrl->dev_state = MHI_STATE_M0; | |
254 | cur_state = mhi_tryset_pm_state(mhi_cntrl, MHI_PM_M0); | |
255 | write_unlock_irq(&mhi_cntrl->pm_lock); | |
256 | if (unlikely(cur_state != MHI_PM_M0)) { | |
257 | dev_err(dev, "Unable to transition to M0 state\n"); | |
258 | return -EIO; | |
259 | } | |
601455da | 260 | mhi_cntrl->M0++; |
a6e2e352 MS |
261 | |
262 | /* Wake up the device */ | |
263 | read_lock_bh(&mhi_cntrl->pm_lock); | |
264 | mhi_cntrl->wake_get(mhi_cntrl, true); | |
265 | ||
266 | /* Ring all event rings and CMD ring only if we're in mission mode */ | |
267 | if (MHI_IN_MISSION_MODE(mhi_cntrl->ee)) { | |
268 | struct mhi_event *mhi_event = mhi_cntrl->mhi_event; | |
269 | struct mhi_cmd *mhi_cmd = | |
270 | &mhi_cntrl->mhi_cmd[PRIMARY_CMD_RING]; | |
271 | ||
272 | for (i = 0; i < mhi_cntrl->total_ev_rings; i++, mhi_event++) { | |
273 | if (mhi_event->offload_ev) | |
274 | continue; | |
275 | ||
276 | spin_lock_irq(&mhi_event->lock); | |
277 | mhi_ring_er_db(mhi_event); | |
278 | spin_unlock_irq(&mhi_event->lock); | |
279 | } | |
280 | ||
281 | /* Only ring primary cmd ring if ring is not empty */ | |
282 | spin_lock_irq(&mhi_cmd->lock); | |
283 | if (mhi_cmd->ring.rp != mhi_cmd->ring.wp) | |
284 | mhi_ring_cmd_db(mhi_cntrl, mhi_cmd); | |
285 | spin_unlock_irq(&mhi_cmd->lock); | |
286 | } | |
287 | ||
288 | /* Ring channel DB registers */ | |
289 | mhi_chan = mhi_cntrl->mhi_chan; | |
290 | for (i = 0; i < mhi_cntrl->max_chan; i++, mhi_chan++) { | |
291 | struct mhi_ring *tre_ring = &mhi_chan->tre_ring; | |
292 | ||
3bc1a5f4 HK |
293 | if (mhi_chan->db_cfg.reset_req) { |
294 | write_lock_irq(&mhi_chan->lock); | |
a6e2e352 | 295 | mhi_chan->db_cfg.db_mode = true; |
3bc1a5f4 HK |
296 | write_unlock_irq(&mhi_chan->lock); |
297 | } | |
298 | ||
299 | read_lock_irq(&mhi_chan->lock); | |
a6e2e352 MS |
300 | |
301 | /* Only ring DB if ring is not empty */ | |
302 | if (tre_ring->base && tre_ring->wp != tre_ring->rp) | |
303 | mhi_ring_chan_db(mhi_cntrl, mhi_chan); | |
3bc1a5f4 | 304 | read_unlock_irq(&mhi_chan->lock); |
a6e2e352 MS |
305 | } |
306 | ||
307 | mhi_cntrl->wake_put(mhi_cntrl, false); | |
308 | read_unlock_bh(&mhi_cntrl->pm_lock); | |
309 | wake_up_all(&mhi_cntrl->state_event); | |
310 | ||
311 | return 0; | |
312 | } | |
313 | ||
314 | /* | |
315 | * After receiving the MHI state change event from the device indicating the | |
316 | * transition to M1 state, the host can transition the device to M2 state | |
317 | * for keeping it in low power state. | |
318 | */ | |
319 | void mhi_pm_m1_transition(struct mhi_controller *mhi_cntrl) | |
320 | { | |
321 | enum mhi_pm_state state; | |
322 | struct device *dev = &mhi_cntrl->mhi_dev->dev; | |
323 | ||
324 | write_lock_irq(&mhi_cntrl->pm_lock); | |
325 | state = mhi_tryset_pm_state(mhi_cntrl, MHI_PM_M2); | |
326 | if (state == MHI_PM_M2) { | |
327 | mhi_set_mhi_state(mhi_cntrl, MHI_STATE_M2); | |
328 | mhi_cntrl->dev_state = MHI_STATE_M2; | |
329 | ||
330 | write_unlock_irq(&mhi_cntrl->pm_lock); | |
601455da BB |
331 | |
332 | mhi_cntrl->M2++; | |
a6e2e352 MS |
333 | wake_up_all(&mhi_cntrl->state_event); |
334 | ||
335 | /* If there are any pending resources, exit M2 immediately */ | |
336 | if (unlikely(atomic_read(&mhi_cntrl->pending_pkts) || | |
337 | atomic_read(&mhi_cntrl->dev_wake))) { | |
338 | dev_dbg(dev, | |
339 | "Exiting M2, pending_pkts: %d dev_wake: %d\n", | |
340 | atomic_read(&mhi_cntrl->pending_pkts), | |
341 | atomic_read(&mhi_cntrl->dev_wake)); | |
342 | read_lock_bh(&mhi_cntrl->pm_lock); | |
343 | mhi_cntrl->wake_get(mhi_cntrl, true); | |
344 | mhi_cntrl->wake_put(mhi_cntrl, true); | |
345 | read_unlock_bh(&mhi_cntrl->pm_lock); | |
346 | } else { | |
347 | mhi_cntrl->status_cb(mhi_cntrl, MHI_CB_IDLE); | |
348 | } | |
349 | } else { | |
350 | write_unlock_irq(&mhi_cntrl->pm_lock); | |
351 | } | |
352 | } | |
353 | ||
354 | /* MHI M3 completion handler */ | |
355 | int mhi_pm_m3_transition(struct mhi_controller *mhi_cntrl) | |
356 | { | |
357 | enum mhi_pm_state state; | |
358 | struct device *dev = &mhi_cntrl->mhi_dev->dev; | |
359 | ||
360 | write_lock_irq(&mhi_cntrl->pm_lock); | |
361 | mhi_cntrl->dev_state = MHI_STATE_M3; | |
362 | state = mhi_tryset_pm_state(mhi_cntrl, MHI_PM_M3); | |
363 | write_unlock_irq(&mhi_cntrl->pm_lock); | |
364 | if (state != MHI_PM_M3) { | |
365 | dev_err(dev, "Unable to transition to M3 state\n"); | |
366 | return -EIO; | |
367 | } | |
368 | ||
601455da | 369 | mhi_cntrl->M3++; |
a6e2e352 MS |
370 | wake_up_all(&mhi_cntrl->state_event); |
371 | ||
372 | return 0; | |
373 | } | |
374 | ||
375 | /* Handle device Mission Mode transition */ | |
376 | static int mhi_pm_mission_mode_transition(struct mhi_controller *mhi_cntrl) | |
377 | { | |
378 | struct mhi_event *mhi_event; | |
379 | struct device *dev = &mhi_cntrl->mhi_dev->dev; | |
380 | int i, ret; | |
381 | ||
382 | dev_dbg(dev, "Processing Mission Mode transition\n"); | |
383 | ||
384 | write_lock_irq(&mhi_cntrl->pm_lock); | |
385 | if (MHI_REG_ACCESS_VALID(mhi_cntrl->pm_state)) | |
386 | mhi_cntrl->ee = mhi_get_exec_env(mhi_cntrl); | |
a6e2e352 | 387 | |
dc53d862 BB |
388 | if (!MHI_IN_MISSION_MODE(mhi_cntrl->ee)) { |
389 | mhi_cntrl->pm_state = MHI_PM_LD_ERR_FATAL_DETECT; | |
390 | write_unlock_irq(&mhi_cntrl->pm_lock); | |
391 | wake_up_all(&mhi_cntrl->state_event); | |
a6e2e352 | 392 | return -EIO; |
dc53d862 BB |
393 | } |
394 | write_unlock_irq(&mhi_cntrl->pm_lock); | |
a6e2e352 MS |
395 | |
396 | wake_up_all(&mhi_cntrl->state_event); | |
397 | ||
398 | mhi_cntrl->status_cb(mhi_cntrl, MHI_CB_EE_MISSION_MODE); | |
399 | ||
400 | /* Force MHI to be in M0 state before continuing */ | |
401 | ret = __mhi_device_get_sync(mhi_cntrl); | |
402 | if (ret) | |
403 | return ret; | |
404 | ||
405 | read_lock_bh(&mhi_cntrl->pm_lock); | |
406 | ||
407 | if (MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state)) { | |
408 | ret = -EIO; | |
409 | goto error_mission_mode; | |
410 | } | |
411 | ||
412 | /* Add elements to all HW event rings */ | |
413 | mhi_event = mhi_cntrl->mhi_event; | |
414 | for (i = 0; i < mhi_cntrl->total_ev_rings; i++, mhi_event++) { | |
415 | struct mhi_ring *ring = &mhi_event->ring; | |
416 | ||
417 | if (mhi_event->offload_ev || !mhi_event->hw_ring) | |
418 | continue; | |
419 | ||
420 | ring->wp = ring->base + ring->len - ring->el_size; | |
421 | *ring->ctxt_wp = ring->iommu_base + ring->len - ring->el_size; | |
422 | /* Update to all cores */ | |
423 | smp_wmb(); | |
424 | ||
425 | spin_lock_irq(&mhi_event->lock); | |
426 | if (MHI_DB_ACCESS_VALID(mhi_cntrl)) | |
427 | mhi_ring_er_db(mhi_event); | |
428 | spin_unlock_irq(&mhi_event->lock); | |
429 | } | |
430 | ||
431 | read_unlock_bh(&mhi_cntrl->pm_lock); | |
432 | ||
433 | /* | |
434 | * The MHI devices are only created when the client device switches its | |
435 | * Execution Environment (EE) to either SBL or AMSS states | |
436 | */ | |
437 | mhi_create_devices(mhi_cntrl); | |
438 | ||
439 | read_lock_bh(&mhi_cntrl->pm_lock); | |
440 | ||
441 | error_mission_mode: | |
442 | mhi_cntrl->wake_put(mhi_cntrl, false); | |
443 | read_unlock_bh(&mhi_cntrl->pm_lock); | |
444 | ||
445 | return ret; | |
446 | } | |
447 | ||
556bbb44 | 448 | /* Handle shutdown transitions */ |
a03c7a86 | 449 | static void mhi_pm_disable_transition(struct mhi_controller *mhi_cntrl) |
a6e2e352 | 450 | { |
a03c7a86 | 451 | enum mhi_pm_state cur_state; |
a6e2e352 MS |
452 | struct mhi_event *mhi_event; |
453 | struct mhi_cmd_ctxt *cmd_ctxt; | |
454 | struct mhi_cmd *mhi_cmd; | |
455 | struct mhi_event_ctxt *er_ctxt; | |
456 | struct device *dev = &mhi_cntrl->mhi_dev->dev; | |
457 | int ret, i; | |
458 | ||
a03c7a86 BB |
459 | dev_dbg(dev, "Processing disable transition with PM state: %s\n", |
460 | to_mhi_pm_state_str(mhi_cntrl->pm_state)); | |
a6e2e352 | 461 | |
a6e2e352 | 462 | mutex_lock(&mhi_cntrl->pm_mutex); |
a6e2e352 MS |
463 | |
464 | /* Trigger MHI RESET so that the device will not access host memory */ | |
a03c7a86 | 465 | if (!MHI_PM_IN_FATAL_STATE(mhi_cntrl->pm_state)) { |
a6e2e352 MS |
466 | u32 in_reset = -1; |
467 | unsigned long timeout = msecs_to_jiffies(mhi_cntrl->timeout_ms); | |
468 | ||
469 | dev_dbg(dev, "Triggering MHI Reset in device\n"); | |
470 | mhi_set_mhi_state(mhi_cntrl, MHI_STATE_RESET); | |
471 | ||
472 | /* Wait for the reset bit to be cleared by the device */ | |
473 | ret = wait_event_timeout(mhi_cntrl->state_event, | |
474 | mhi_read_reg_field(mhi_cntrl, | |
475 | mhi_cntrl->regs, | |
476 | MHICTRL, | |
477 | MHICTRL_RESET_MASK, | |
478 | MHICTRL_RESET_SHIFT, | |
479 | &in_reset) || | |
480 | !in_reset, timeout); | |
556bbb44 | 481 | if (!ret || in_reset) |
a6e2e352 | 482 | dev_err(dev, "Device failed to exit MHI Reset state\n"); |
a6e2e352 MS |
483 | |
484 | /* | |
485 | * Device will clear BHI_INTVEC as a part of RESET processing, | |
486 | * hence re-program it | |
487 | */ | |
488 | mhi_write_reg(mhi_cntrl, mhi_cntrl->bhi, BHI_INTVEC, 0); | |
489 | } | |
490 | ||
491 | dev_dbg(dev, | |
492 | "Waiting for all pending event ring processing to complete\n"); | |
493 | mhi_event = mhi_cntrl->mhi_event; | |
494 | for (i = 0; i < mhi_cntrl->total_ev_rings; i++, mhi_event++) { | |
495 | if (mhi_event->offload_ev) | |
496 | continue; | |
497 | tasklet_kill(&mhi_event->task); | |
498 | } | |
499 | ||
500 | /* Release lock and wait for all pending threads to complete */ | |
501 | mutex_unlock(&mhi_cntrl->pm_mutex); | |
502 | dev_dbg(dev, "Waiting for all pending threads to complete\n"); | |
503 | wake_up_all(&mhi_cntrl->state_event); | |
a6e2e352 MS |
504 | |
505 | dev_dbg(dev, "Reset all active channels and remove MHI devices\n"); | |
506 | device_for_each_child(mhi_cntrl->cntrl_dev, NULL, mhi_destroy_device); | |
507 | ||
508 | mutex_lock(&mhi_cntrl->pm_mutex); | |
509 | ||
510 | WARN_ON(atomic_read(&mhi_cntrl->dev_wake)); | |
511 | WARN_ON(atomic_read(&mhi_cntrl->pending_pkts)); | |
512 | ||
513 | /* Reset the ev rings and cmd rings */ | |
514 | dev_dbg(dev, "Resetting EV CTXT and CMD CTXT\n"); | |
515 | mhi_cmd = mhi_cntrl->mhi_cmd; | |
516 | cmd_ctxt = mhi_cntrl->mhi_ctxt->cmd_ctxt; | |
517 | for (i = 0; i < NR_OF_CMD_RINGS; i++, mhi_cmd++, cmd_ctxt++) { | |
518 | struct mhi_ring *ring = &mhi_cmd->ring; | |
519 | ||
520 | ring->rp = ring->base; | |
521 | ring->wp = ring->base; | |
522 | cmd_ctxt->rp = cmd_ctxt->rbase; | |
523 | cmd_ctxt->wp = cmd_ctxt->rbase; | |
524 | } | |
525 | ||
526 | mhi_event = mhi_cntrl->mhi_event; | |
527 | er_ctxt = mhi_cntrl->mhi_ctxt->er_ctxt; | |
528 | for (i = 0; i < mhi_cntrl->total_ev_rings; i++, er_ctxt++, | |
529 | mhi_event++) { | |
530 | struct mhi_ring *ring = &mhi_event->ring; | |
531 | ||
532 | /* Skip offload events */ | |
533 | if (mhi_event->offload_ev) | |
534 | continue; | |
535 | ||
536 | ring->rp = ring->base; | |
537 | ring->wp = ring->base; | |
538 | er_ctxt->rp = er_ctxt->rbase; | |
539 | er_ctxt->wp = er_ctxt->rbase; | |
540 | } | |
541 | ||
556bbb44 BB |
542 | /* Move to disable state */ |
543 | write_lock_irq(&mhi_cntrl->pm_lock); | |
544 | cur_state = mhi_tryset_pm_state(mhi_cntrl, MHI_PM_DISABLE); | |
545 | write_unlock_irq(&mhi_cntrl->pm_lock); | |
546 | if (unlikely(cur_state != MHI_PM_DISABLE)) | |
547 | dev_err(dev, "Error moving from PM state: %s to: %s\n", | |
548 | to_mhi_pm_state_str(cur_state), | |
549 | to_mhi_pm_state_str(MHI_PM_DISABLE)); | |
550 | ||
551 | dev_dbg(dev, "Exiting with PM state: %s, MHI state: %s\n", | |
552 | to_mhi_pm_state_str(mhi_cntrl->pm_state), | |
553 | TO_MHI_STATE_STR(mhi_cntrl->dev_state)); | |
554 | ||
555 | mutex_unlock(&mhi_cntrl->pm_mutex); | |
556 | } | |
557 | ||
558 | /* Handle system error transitions */ | |
559 | static void mhi_pm_sys_error_transition(struct mhi_controller *mhi_cntrl) | |
560 | { | |
561 | enum mhi_pm_state cur_state, prev_state; | |
562 | struct mhi_event *mhi_event; | |
563 | struct mhi_cmd_ctxt *cmd_ctxt; | |
564 | struct mhi_cmd *mhi_cmd; | |
565 | struct mhi_event_ctxt *er_ctxt; | |
566 | struct device *dev = &mhi_cntrl->mhi_dev->dev; | |
567 | int ret, i; | |
568 | ||
569 | dev_dbg(dev, "Transitioning from PM state: %s to: %s\n", | |
570 | to_mhi_pm_state_str(mhi_cntrl->pm_state), | |
571 | to_mhi_pm_state_str(MHI_PM_SYS_ERR_PROCESS)); | |
572 | ||
573 | /* We must notify MHI control driver so it can clean up first */ | |
574 | mhi_cntrl->status_cb(mhi_cntrl, MHI_CB_SYS_ERROR); | |
575 | ||
576 | mutex_lock(&mhi_cntrl->pm_mutex); | |
577 | write_lock_irq(&mhi_cntrl->pm_lock); | |
578 | prev_state = mhi_cntrl->pm_state; | |
579 | cur_state = mhi_tryset_pm_state(mhi_cntrl, MHI_PM_SYS_ERR_PROCESS); | |
580 | write_unlock_irq(&mhi_cntrl->pm_lock); | |
581 | ||
582 | if (cur_state != MHI_PM_SYS_ERR_PROCESS) { | |
583 | dev_err(dev, "Failed to transition from PM state: %s to: %s\n", | |
584 | to_mhi_pm_state_str(cur_state), | |
585 | to_mhi_pm_state_str(MHI_PM_SYS_ERR_PROCESS)); | |
586 | goto exit_sys_error_transition; | |
587 | } | |
588 | ||
589 | mhi_cntrl->ee = MHI_EE_DISABLE_TRANSITION; | |
590 | mhi_cntrl->dev_state = MHI_STATE_RESET; | |
591 | ||
592 | /* Wake up threads waiting for state transition */ | |
593 | wake_up_all(&mhi_cntrl->state_event); | |
594 | ||
595 | /* Trigger MHI RESET so that the device will not access host memory */ | |
596 | if (MHI_REG_ACCESS_VALID(prev_state)) { | |
597 | u32 in_reset = -1; | |
598 | unsigned long timeout = msecs_to_jiffies(mhi_cntrl->timeout_ms); | |
599 | ||
600 | dev_dbg(dev, "Triggering MHI Reset in device\n"); | |
601 | mhi_set_mhi_state(mhi_cntrl, MHI_STATE_RESET); | |
602 | ||
603 | /* Wait for the reset bit to be cleared by the device */ | |
604 | ret = wait_event_timeout(mhi_cntrl->state_event, | |
605 | mhi_read_reg_field(mhi_cntrl, | |
606 | mhi_cntrl->regs, | |
607 | MHICTRL, | |
608 | MHICTRL_RESET_MASK, | |
609 | MHICTRL_RESET_SHIFT, | |
610 | &in_reset) || | |
611 | !in_reset, timeout); | |
612 | if (!ret || in_reset) { | |
613 | dev_err(dev, "Device failed to exit MHI Reset state\n"); | |
614 | goto exit_sys_error_transition; | |
615 | } | |
616 | ||
617 | /* | |
618 | * Device will clear BHI_INTVEC as a part of RESET processing, | |
619 | * hence re-program it | |
620 | */ | |
621 | mhi_write_reg(mhi_cntrl, mhi_cntrl->bhi, BHI_INTVEC, 0); | |
622 | } | |
623 | ||
624 | dev_dbg(dev, | |
625 | "Waiting for all pending event ring processing to complete\n"); | |
626 | mhi_event = mhi_cntrl->mhi_event; | |
627 | for (i = 0; i < mhi_cntrl->total_ev_rings; i++, mhi_event++) { | |
628 | if (mhi_event->offload_ev) | |
629 | continue; | |
630 | tasklet_kill(&mhi_event->task); | |
631 | } | |
632 | ||
633 | /* Release lock and wait for all pending threads to complete */ | |
634 | mutex_unlock(&mhi_cntrl->pm_mutex); | |
635 | dev_dbg(dev, "Waiting for all pending threads to complete\n"); | |
636 | wake_up_all(&mhi_cntrl->state_event); | |
637 | ||
638 | dev_dbg(dev, "Reset all active channels and remove MHI devices\n"); | |
639 | device_for_each_child(mhi_cntrl->cntrl_dev, NULL, mhi_destroy_device); | |
640 | ||
641 | mutex_lock(&mhi_cntrl->pm_mutex); | |
642 | ||
643 | WARN_ON(atomic_read(&mhi_cntrl->dev_wake)); | |
644 | WARN_ON(atomic_read(&mhi_cntrl->pending_pkts)); | |
645 | ||
646 | /* Reset the ev rings and cmd rings */ | |
647 | dev_dbg(dev, "Resetting EV CTXT and CMD CTXT\n"); | |
648 | mhi_cmd = mhi_cntrl->mhi_cmd; | |
649 | cmd_ctxt = mhi_cntrl->mhi_ctxt->cmd_ctxt; | |
650 | for (i = 0; i < NR_OF_CMD_RINGS; i++, mhi_cmd++, cmd_ctxt++) { | |
651 | struct mhi_ring *ring = &mhi_cmd->ring; | |
652 | ||
653 | ring->rp = ring->base; | |
654 | ring->wp = ring->base; | |
655 | cmd_ctxt->rp = cmd_ctxt->rbase; | |
656 | cmd_ctxt->wp = cmd_ctxt->rbase; | |
a6e2e352 MS |
657 | } |
658 | ||
556bbb44 BB |
659 | mhi_event = mhi_cntrl->mhi_event; |
660 | er_ctxt = mhi_cntrl->mhi_ctxt->er_ctxt; | |
661 | for (i = 0; i < mhi_cntrl->total_ev_rings; i++, er_ctxt++, | |
662 | mhi_event++) { | |
663 | struct mhi_ring *ring = &mhi_event->ring; | |
664 | ||
665 | /* Skip offload events */ | |
666 | if (mhi_event->offload_ev) | |
667 | continue; | |
668 | ||
669 | ring->rp = ring->base; | |
670 | ring->wp = ring->base; | |
671 | er_ctxt->rp = er_ctxt->rbase; | |
672 | er_ctxt->wp = er_ctxt->rbase; | |
673 | } | |
674 | ||
675 | mhi_ready_state_transition(mhi_cntrl); | |
676 | ||
677 | exit_sys_error_transition: | |
a6e2e352 MS |
678 | dev_dbg(dev, "Exiting with PM state: %s, MHI state: %s\n", |
679 | to_mhi_pm_state_str(mhi_cntrl->pm_state), | |
680 | TO_MHI_STATE_STR(mhi_cntrl->dev_state)); | |
681 | ||
682 | mutex_unlock(&mhi_cntrl->pm_mutex); | |
683 | } | |
684 | ||
685 | /* Queue a new work item and schedule work */ | |
686 | int mhi_queue_state_transition(struct mhi_controller *mhi_cntrl, | |
687 | enum dev_st_transition state) | |
688 | { | |
689 | struct state_transition *item = kmalloc(sizeof(*item), GFP_ATOMIC); | |
690 | unsigned long flags; | |
691 | ||
692 | if (!item) | |
693 | return -ENOMEM; | |
694 | ||
695 | item->state = state; | |
696 | spin_lock_irqsave(&mhi_cntrl->transition_lock, flags); | |
697 | list_add_tail(&item->node, &mhi_cntrl->transition_list); | |
698 | spin_unlock_irqrestore(&mhi_cntrl->transition_lock, flags); | |
699 | ||
8f703978 | 700 | queue_work(mhi_cntrl->hiprio_wq, &mhi_cntrl->st_worker); |
a6e2e352 MS |
701 | |
702 | return 0; | |
703 | } | |
704 | ||
705 | /* SYS_ERR worker */ | |
bc7ccce5 | 706 | void mhi_pm_sys_err_handler(struct mhi_controller *mhi_cntrl) |
a6e2e352 | 707 | { |
bc7ccce5 HK |
708 | struct device *dev = &mhi_cntrl->mhi_dev->dev; |
709 | ||
710 | /* skip if controller supports RDDM */ | |
711 | if (mhi_cntrl->rddm_image) { | |
712 | dev_dbg(dev, "Controller supports RDDM, skip SYS_ERROR\n"); | |
713 | return; | |
714 | } | |
a6e2e352 | 715 | |
bc7ccce5 | 716 | mhi_queue_state_transition(mhi_cntrl, DEV_ST_TRANSITION_SYS_ERR); |
a6e2e352 MS |
717 | } |
718 | ||
719 | /* Device State Transition worker */ | |
720 | void mhi_pm_st_worker(struct work_struct *work) | |
721 | { | |
722 | struct state_transition *itr, *tmp; | |
723 | LIST_HEAD(head); | |
724 | struct mhi_controller *mhi_cntrl = container_of(work, | |
725 | struct mhi_controller, | |
726 | st_worker); | |
727 | struct device *dev = &mhi_cntrl->mhi_dev->dev; | |
728 | ||
729 | spin_lock_irq(&mhi_cntrl->transition_lock); | |
730 | list_splice_tail_init(&mhi_cntrl->transition_list, &head); | |
731 | spin_unlock_irq(&mhi_cntrl->transition_lock); | |
732 | ||
733 | list_for_each_entry_safe(itr, tmp, &head, node) { | |
734 | list_del(&itr->node); | |
735 | dev_dbg(dev, "Handling state transition: %s\n", | |
736 | TO_DEV_STATE_TRANS_STR(itr->state)); | |
737 | ||
738 | switch (itr->state) { | |
739 | case DEV_ST_TRANSITION_PBL: | |
740 | write_lock_irq(&mhi_cntrl->pm_lock); | |
741 | if (MHI_REG_ACCESS_VALID(mhi_cntrl->pm_state)) | |
742 | mhi_cntrl->ee = mhi_get_exec_env(mhi_cntrl); | |
743 | write_unlock_irq(&mhi_cntrl->pm_lock); | |
744 | if (MHI_IN_PBL(mhi_cntrl->ee)) | |
560e3a04 | 745 | mhi_fw_load_handler(mhi_cntrl); |
a6e2e352 MS |
746 | break; |
747 | case DEV_ST_TRANSITION_SBL: | |
748 | write_lock_irq(&mhi_cntrl->pm_lock); | |
749 | mhi_cntrl->ee = MHI_EE_SBL; | |
750 | write_unlock_irq(&mhi_cntrl->pm_lock); | |
751 | /* | |
752 | * The MHI devices are only created when the client | |
753 | * device switches its Execution Environment (EE) to | |
754 | * either SBL or AMSS states | |
755 | */ | |
756 | mhi_create_devices(mhi_cntrl); | |
757 | break; | |
758 | case DEV_ST_TRANSITION_MISSION_MODE: | |
759 | mhi_pm_mission_mode_transition(mhi_cntrl); | |
760 | break; | |
761 | case DEV_ST_TRANSITION_READY: | |
762 | mhi_ready_state_transition(mhi_cntrl); | |
763 | break; | |
bc7ccce5 | 764 | case DEV_ST_TRANSITION_SYS_ERR: |
556bbb44 | 765 | mhi_pm_sys_error_transition(mhi_cntrl); |
bc7ccce5 | 766 | break; |
3c1bd004 | 767 | case DEV_ST_TRANSITION_DISABLE: |
a03c7a86 | 768 | mhi_pm_disable_transition(mhi_cntrl); |
3c1bd004 | 769 | break; |
a6e2e352 MS |
770 | default: |
771 | break; | |
772 | } | |
773 | kfree(itr); | |
774 | } | |
775 | } | |
776 | ||
0c6b20a1 MS |
777 | int mhi_pm_suspend(struct mhi_controller *mhi_cntrl) |
778 | { | |
779 | struct mhi_chan *itr, *tmp; | |
780 | struct device *dev = &mhi_cntrl->mhi_dev->dev; | |
781 | enum mhi_pm_state new_state; | |
782 | int ret; | |
783 | ||
784 | if (mhi_cntrl->pm_state == MHI_PM_DISABLE) | |
785 | return -EINVAL; | |
786 | ||
787 | if (MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state)) | |
788 | return -EIO; | |
789 | ||
790 | /* Return busy if there are any pending resources */ | |
515847c5 BB |
791 | if (atomic_read(&mhi_cntrl->dev_wake) || |
792 | atomic_read(&mhi_cntrl->pending_pkts)) | |
0c6b20a1 MS |
793 | return -EBUSY; |
794 | ||
795 | /* Take MHI out of M2 state */ | |
796 | read_lock_bh(&mhi_cntrl->pm_lock); | |
797 | mhi_cntrl->wake_get(mhi_cntrl, false); | |
798 | read_unlock_bh(&mhi_cntrl->pm_lock); | |
799 | ||
800 | ret = wait_event_timeout(mhi_cntrl->state_event, | |
801 | mhi_cntrl->dev_state == MHI_STATE_M0 || | |
802 | mhi_cntrl->dev_state == MHI_STATE_M1 || | |
803 | MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state), | |
804 | msecs_to_jiffies(mhi_cntrl->timeout_ms)); | |
805 | ||
806 | read_lock_bh(&mhi_cntrl->pm_lock); | |
807 | mhi_cntrl->wake_put(mhi_cntrl, false); | |
808 | read_unlock_bh(&mhi_cntrl->pm_lock); | |
809 | ||
810 | if (!ret || MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state)) { | |
811 | dev_err(dev, | |
812 | "Could not enter M0/M1 state"); | |
813 | return -EIO; | |
814 | } | |
815 | ||
816 | write_lock_irq(&mhi_cntrl->pm_lock); | |
817 | ||
515847c5 BB |
818 | if (atomic_read(&mhi_cntrl->dev_wake) || |
819 | atomic_read(&mhi_cntrl->pending_pkts)) { | |
0c6b20a1 MS |
820 | write_unlock_irq(&mhi_cntrl->pm_lock); |
821 | return -EBUSY; | |
822 | } | |
823 | ||
824 | dev_info(dev, "Allowing M3 transition\n"); | |
825 | new_state = mhi_tryset_pm_state(mhi_cntrl, MHI_PM_M3_ENTER); | |
826 | if (new_state != MHI_PM_M3_ENTER) { | |
827 | write_unlock_irq(&mhi_cntrl->pm_lock); | |
828 | dev_err(dev, | |
829 | "Error setting to PM state: %s from: %s\n", | |
830 | to_mhi_pm_state_str(MHI_PM_M3_ENTER), | |
831 | to_mhi_pm_state_str(mhi_cntrl->pm_state)); | |
832 | return -EIO; | |
833 | } | |
834 | ||
835 | /* Set MHI to M3 and wait for completion */ | |
836 | mhi_set_mhi_state(mhi_cntrl, MHI_STATE_M3); | |
837 | write_unlock_irq(&mhi_cntrl->pm_lock); | |
838 | dev_info(dev, "Wait for M3 completion\n"); | |
839 | ||
840 | ret = wait_event_timeout(mhi_cntrl->state_event, | |
841 | mhi_cntrl->dev_state == MHI_STATE_M3 || | |
842 | MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state), | |
843 | msecs_to_jiffies(mhi_cntrl->timeout_ms)); | |
844 | ||
845 | if (!ret || MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state)) { | |
846 | dev_err(dev, | |
847 | "Did not enter M3 state, MHI state: %s, PM state: %s\n", | |
848 | TO_MHI_STATE_STR(mhi_cntrl->dev_state), | |
849 | to_mhi_pm_state_str(mhi_cntrl->pm_state)); | |
850 | return -EIO; | |
851 | } | |
852 | ||
853 | /* Notify clients about entering LPM */ | |
854 | list_for_each_entry_safe(itr, tmp, &mhi_cntrl->lpm_chans, node) { | |
855 | mutex_lock(&itr->mutex); | |
856 | if (itr->mhi_dev) | |
857 | mhi_notify(itr->mhi_dev, MHI_CB_LPM_ENTER); | |
858 | mutex_unlock(&itr->mutex); | |
859 | } | |
860 | ||
861 | return 0; | |
862 | } | |
863 | EXPORT_SYMBOL_GPL(mhi_pm_suspend); | |
864 | ||
865 | int mhi_pm_resume(struct mhi_controller *mhi_cntrl) | |
866 | { | |
867 | struct mhi_chan *itr, *tmp; | |
868 | struct device *dev = &mhi_cntrl->mhi_dev->dev; | |
869 | enum mhi_pm_state cur_state; | |
870 | int ret; | |
871 | ||
872 | dev_info(dev, "Entered with PM state: %s, MHI state: %s\n", | |
873 | to_mhi_pm_state_str(mhi_cntrl->pm_state), | |
874 | TO_MHI_STATE_STR(mhi_cntrl->dev_state)); | |
875 | ||
876 | if (mhi_cntrl->pm_state == MHI_PM_DISABLE) | |
877 | return 0; | |
878 | ||
879 | if (MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state)) | |
880 | return -EIO; | |
881 | ||
882 | /* Notify clients about exiting LPM */ | |
883 | list_for_each_entry_safe(itr, tmp, &mhi_cntrl->lpm_chans, node) { | |
884 | mutex_lock(&itr->mutex); | |
885 | if (itr->mhi_dev) | |
886 | mhi_notify(itr->mhi_dev, MHI_CB_LPM_EXIT); | |
887 | mutex_unlock(&itr->mutex); | |
888 | } | |
889 | ||
890 | write_lock_irq(&mhi_cntrl->pm_lock); | |
891 | cur_state = mhi_tryset_pm_state(mhi_cntrl, MHI_PM_M3_EXIT); | |
892 | if (cur_state != MHI_PM_M3_EXIT) { | |
893 | write_unlock_irq(&mhi_cntrl->pm_lock); | |
894 | dev_info(dev, | |
895 | "Error setting to PM state: %s from: %s\n", | |
896 | to_mhi_pm_state_str(MHI_PM_M3_EXIT), | |
897 | to_mhi_pm_state_str(mhi_cntrl->pm_state)); | |
898 | return -EIO; | |
899 | } | |
900 | ||
901 | /* Set MHI to M0 and wait for completion */ | |
902 | mhi_set_mhi_state(mhi_cntrl, MHI_STATE_M0); | |
903 | write_unlock_irq(&mhi_cntrl->pm_lock); | |
904 | ||
905 | ret = wait_event_timeout(mhi_cntrl->state_event, | |
906 | mhi_cntrl->dev_state == MHI_STATE_M0 || | |
907 | MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state), | |
908 | msecs_to_jiffies(mhi_cntrl->timeout_ms)); | |
909 | ||
910 | if (!ret || MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state)) { | |
911 | dev_err(dev, | |
912 | "Did not enter M0 state, MHI state: %s, PM state: %s\n", | |
913 | TO_MHI_STATE_STR(mhi_cntrl->dev_state), | |
914 | to_mhi_pm_state_str(mhi_cntrl->pm_state)); | |
915 | return -EIO; | |
916 | } | |
917 | ||
918 | return 0; | |
919 | } | |
920 | EXPORT_SYMBOL_GPL(mhi_pm_resume); | |
921 | ||
a6e2e352 MS |
922 | int __mhi_device_get_sync(struct mhi_controller *mhi_cntrl) |
923 | { | |
924 | int ret; | |
925 | ||
926 | /* Wake up the device */ | |
927 | read_lock_bh(&mhi_cntrl->pm_lock); | |
8e055992 BB |
928 | if (MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state)) { |
929 | read_unlock_bh(&mhi_cntrl->pm_lock); | |
930 | return -EIO; | |
931 | } | |
a6e2e352 | 932 | mhi_cntrl->wake_get(mhi_cntrl, true); |
8b53087c BB |
933 | if (MHI_PM_IN_SUSPEND_STATE(mhi_cntrl->pm_state)) |
934 | mhi_trigger_resume(mhi_cntrl); | |
a6e2e352 MS |
935 | read_unlock_bh(&mhi_cntrl->pm_lock); |
936 | ||
937 | ret = wait_event_timeout(mhi_cntrl->state_event, | |
938 | mhi_cntrl->pm_state == MHI_PM_M0 || | |
939 | MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state), | |
940 | msecs_to_jiffies(mhi_cntrl->timeout_ms)); | |
941 | ||
942 | if (!ret || MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state)) { | |
943 | read_lock_bh(&mhi_cntrl->pm_lock); | |
944 | mhi_cntrl->wake_put(mhi_cntrl, false); | |
945 | read_unlock_bh(&mhi_cntrl->pm_lock); | |
946 | return -EIO; | |
947 | } | |
948 | ||
949 | return 0; | |
950 | } | |
3000f85b MS |
951 | |
952 | /* Assert device wake db */ | |
953 | static void mhi_assert_dev_wake(struct mhi_controller *mhi_cntrl, bool force) | |
954 | { | |
955 | unsigned long flags; | |
956 | ||
957 | /* | |
958 | * If force flag is set, then increment the wake count value and | |
959 | * ring wake db | |
960 | */ | |
961 | if (unlikely(force)) { | |
962 | spin_lock_irqsave(&mhi_cntrl->wlock, flags); | |
963 | atomic_inc(&mhi_cntrl->dev_wake); | |
964 | if (MHI_WAKE_DB_FORCE_SET_VALID(mhi_cntrl->pm_state) && | |
965 | !mhi_cntrl->wake_set) { | |
966 | mhi_write_db(mhi_cntrl, mhi_cntrl->wake_db, 1); | |
967 | mhi_cntrl->wake_set = true; | |
968 | } | |
969 | spin_unlock_irqrestore(&mhi_cntrl->wlock, flags); | |
970 | } else { | |
971 | /* | |
972 | * If resources are already requested, then just increment | |
973 | * the wake count value and return | |
974 | */ | |
975 | if (likely(atomic_add_unless(&mhi_cntrl->dev_wake, 1, 0))) | |
976 | return; | |
977 | ||
978 | spin_lock_irqsave(&mhi_cntrl->wlock, flags); | |
979 | if ((atomic_inc_return(&mhi_cntrl->dev_wake) == 1) && | |
980 | MHI_WAKE_DB_SET_VALID(mhi_cntrl->pm_state) && | |
981 | !mhi_cntrl->wake_set) { | |
982 | mhi_write_db(mhi_cntrl, mhi_cntrl->wake_db, 1); | |
983 | mhi_cntrl->wake_set = true; | |
984 | } | |
985 | spin_unlock_irqrestore(&mhi_cntrl->wlock, flags); | |
986 | } | |
987 | } | |
988 | ||
989 | /* De-assert device wake db */ | |
990 | static void mhi_deassert_dev_wake(struct mhi_controller *mhi_cntrl, | |
991 | bool override) | |
992 | { | |
993 | unsigned long flags; | |
994 | ||
995 | /* | |
996 | * Only continue if there is a single resource, else just decrement | |
997 | * and return | |
998 | */ | |
999 | if (likely(atomic_add_unless(&mhi_cntrl->dev_wake, -1, 1))) | |
1000 | return; | |
1001 | ||
1002 | spin_lock_irqsave(&mhi_cntrl->wlock, flags); | |
1003 | if ((atomic_dec_return(&mhi_cntrl->dev_wake) == 0) && | |
1004 | MHI_WAKE_DB_CLEAR_VALID(mhi_cntrl->pm_state) && !override && | |
1005 | mhi_cntrl->wake_set) { | |
1006 | mhi_write_db(mhi_cntrl, mhi_cntrl->wake_db, 0); | |
1007 | mhi_cntrl->wake_set = false; | |
1008 | } | |
1009 | spin_unlock_irqrestore(&mhi_cntrl->wlock, flags); | |
1010 | } | |
1011 | ||
1012 | int mhi_async_power_up(struct mhi_controller *mhi_cntrl) | |
1013 | { | |
e18d4e9f | 1014 | enum mhi_state state; |
3000f85b MS |
1015 | enum mhi_ee_type current_ee; |
1016 | enum dev_st_transition next_state; | |
1017 | struct device *dev = &mhi_cntrl->mhi_dev->dev; | |
1018 | u32 val; | |
1019 | int ret; | |
1020 | ||
1021 | dev_info(dev, "Requested to power ON\n"); | |
1022 | ||
3000f85b MS |
1023 | /* Supply default wake routines if not provided by controller driver */ |
1024 | if (!mhi_cntrl->wake_get || !mhi_cntrl->wake_put || | |
1025 | !mhi_cntrl->wake_toggle) { | |
1026 | mhi_cntrl->wake_get = mhi_assert_dev_wake; | |
1027 | mhi_cntrl->wake_put = mhi_deassert_dev_wake; | |
1028 | mhi_cntrl->wake_toggle = (mhi_cntrl->db_access & MHI_PM_M2) ? | |
1029 | mhi_toggle_dev_wake_nop : mhi_toggle_dev_wake; | |
1030 | } | |
1031 | ||
1032 | mutex_lock(&mhi_cntrl->pm_mutex); | |
1033 | mhi_cntrl->pm_state = MHI_PM_DISABLE; | |
1034 | ||
1035 | if (!mhi_cntrl->pre_init) { | |
1036 | /* Setup device context */ | |
1037 | ret = mhi_init_dev_ctxt(mhi_cntrl); | |
1038 | if (ret) | |
1039 | goto error_dev_ctxt; | |
1040 | } | |
1041 | ||
1042 | ret = mhi_init_irq_setup(mhi_cntrl); | |
1043 | if (ret) | |
1044 | goto error_setup_irq; | |
1045 | ||
1046 | /* Setup BHI offset & INTVEC */ | |
1047 | write_lock_irq(&mhi_cntrl->pm_lock); | |
1048 | ret = mhi_read_reg(mhi_cntrl, mhi_cntrl->regs, BHIOFF, &val); | |
1049 | if (ret) { | |
1050 | write_unlock_irq(&mhi_cntrl->pm_lock); | |
1051 | goto error_bhi_offset; | |
1052 | } | |
1053 | ||
1054 | mhi_cntrl->bhi = mhi_cntrl->regs + val; | |
1055 | ||
1056 | /* Setup BHIE offset */ | |
1057 | if (mhi_cntrl->fbc_download) { | |
1058 | ret = mhi_read_reg(mhi_cntrl, mhi_cntrl->regs, BHIEOFF, &val); | |
1059 | if (ret) { | |
1060 | write_unlock_irq(&mhi_cntrl->pm_lock); | |
1061 | dev_err(dev, "Error reading BHIE offset\n"); | |
1062 | goto error_bhi_offset; | |
1063 | } | |
1064 | ||
1065 | mhi_cntrl->bhie = mhi_cntrl->regs + val; | |
1066 | } | |
1067 | ||
1068 | mhi_write_reg(mhi_cntrl, mhi_cntrl->bhi, BHI_INTVEC, 0); | |
1069 | mhi_cntrl->pm_state = MHI_PM_POR; | |
1070 | mhi_cntrl->ee = MHI_EE_MAX; | |
1071 | current_ee = mhi_get_exec_env(mhi_cntrl); | |
1072 | write_unlock_irq(&mhi_cntrl->pm_lock); | |
1073 | ||
1074 | /* Confirm that the device is in valid exec env */ | |
1075 | if (!MHI_IN_PBL(current_ee) && current_ee != MHI_EE_AMSS) { | |
1076 | dev_err(dev, "Not a valid EE for power on\n"); | |
1077 | ret = -EIO; | |
1078 | goto error_bhi_offset; | |
1079 | } | |
1080 | ||
e18d4e9f JH |
1081 | state = mhi_get_mhi_state(mhi_cntrl); |
1082 | if (state == MHI_STATE_SYS_ERR) { | |
1083 | mhi_set_mhi_state(mhi_cntrl, MHI_STATE_RESET); | |
1084 | ret = wait_event_timeout(mhi_cntrl->state_event, | |
1085 | MHI_PM_IN_FATAL_STATE(mhi_cntrl->pm_state) || | |
1086 | mhi_read_reg_field(mhi_cntrl, | |
1087 | mhi_cntrl->regs, | |
1088 | MHICTRL, | |
1089 | MHICTRL_RESET_MASK, | |
1090 | MHICTRL_RESET_SHIFT, | |
1091 | &val) || | |
1092 | !val, | |
1093 | msecs_to_jiffies(mhi_cntrl->timeout_ms)); | |
1094 | if (ret) { | |
1095 | ret = -EIO; | |
1096 | dev_info(dev, "Failed to reset MHI due to syserr state\n"); | |
1097 | goto error_bhi_offset; | |
1098 | } | |
1099 | ||
1100 | /* | |
1101 | * device cleares INTVEC as part of RESET processing, | |
1102 | * re-program it | |
1103 | */ | |
1104 | mhi_write_reg(mhi_cntrl, mhi_cntrl->bhi, BHI_INTVEC, 0); | |
1105 | } | |
1106 | ||
3000f85b MS |
1107 | /* Transition to next state */ |
1108 | next_state = MHI_IN_PBL(current_ee) ? | |
1109 | DEV_ST_TRANSITION_PBL : DEV_ST_TRANSITION_READY; | |
1110 | ||
3000f85b MS |
1111 | mhi_queue_state_transition(mhi_cntrl, next_state); |
1112 | ||
1113 | mutex_unlock(&mhi_cntrl->pm_mutex); | |
1114 | ||
1115 | dev_info(dev, "Power on setup success\n"); | |
1116 | ||
1117 | return 0; | |
1118 | ||
1119 | error_bhi_offset: | |
1120 | mhi_deinit_free_irq(mhi_cntrl); | |
1121 | ||
1122 | error_setup_irq: | |
1123 | if (!mhi_cntrl->pre_init) | |
1124 | mhi_deinit_dev_ctxt(mhi_cntrl); | |
1125 | ||
1126 | error_dev_ctxt: | |
1127 | mutex_unlock(&mhi_cntrl->pm_mutex); | |
1128 | ||
1129 | return ret; | |
1130 | } | |
1131 | EXPORT_SYMBOL_GPL(mhi_async_power_up); | |
1132 | ||
1133 | void mhi_power_down(struct mhi_controller *mhi_cntrl, bool graceful) | |
1134 | { | |
a03c7a86 | 1135 | enum mhi_pm_state cur_state, transition_state; |
3000f85b MS |
1136 | struct device *dev = &mhi_cntrl->mhi_dev->dev; |
1137 | ||
1138 | /* If it's not a graceful shutdown, force MHI to linkdown state */ | |
a03c7a86 BB |
1139 | transition_state = (graceful) ? MHI_PM_SHUTDOWN_PROCESS : |
1140 | MHI_PM_LD_ERR_FATAL_DETECT; | |
1141 | ||
1142 | mutex_lock(&mhi_cntrl->pm_mutex); | |
1143 | write_lock_irq(&mhi_cntrl->pm_lock); | |
1144 | cur_state = mhi_tryset_pm_state(mhi_cntrl, transition_state); | |
1145 | if (cur_state != transition_state) { | |
1146 | dev_err(dev, "Failed to move to state: %s from: %s\n", | |
1147 | to_mhi_pm_state_str(transition_state), | |
1148 | to_mhi_pm_state_str(mhi_cntrl->pm_state)); | |
1149 | /* Force link down or error fatal detected state */ | |
1150 | mhi_cntrl->pm_state = MHI_PM_LD_ERR_FATAL_DETECT; | |
3000f85b | 1151 | } |
3c1bd004 | 1152 | |
a03c7a86 BB |
1153 | /* mark device inactive to avoid any further host processing */ |
1154 | mhi_cntrl->ee = MHI_EE_DISABLE_TRANSITION; | |
1155 | mhi_cntrl->dev_state = MHI_STATE_RESET; | |
1156 | ||
1157 | wake_up_all(&mhi_cntrl->state_event); | |
1158 | ||
1159 | write_unlock_irq(&mhi_cntrl->pm_lock); | |
1160 | mutex_unlock(&mhi_cntrl->pm_mutex); | |
1161 | ||
3c1bd004 HK |
1162 | mhi_queue_state_transition(mhi_cntrl, DEV_ST_TRANSITION_DISABLE); |
1163 | ||
1164 | /* Wait for shutdown to complete */ | |
1165 | flush_work(&mhi_cntrl->st_worker); | |
1166 | ||
3000f85b MS |
1167 | mhi_deinit_free_irq(mhi_cntrl); |
1168 | ||
1169 | if (!mhi_cntrl->pre_init) { | |
1170 | /* Free all allocated resources */ | |
1171 | if (mhi_cntrl->fbc_image) { | |
1172 | mhi_free_bhie_table(mhi_cntrl, mhi_cntrl->fbc_image); | |
1173 | mhi_cntrl->fbc_image = NULL; | |
1174 | } | |
1175 | mhi_deinit_dev_ctxt(mhi_cntrl); | |
1176 | } | |
1177 | } | |
1178 | EXPORT_SYMBOL_GPL(mhi_power_down); | |
1179 | ||
1180 | int mhi_sync_power_up(struct mhi_controller *mhi_cntrl) | |
1181 | { | |
1182 | int ret = mhi_async_power_up(mhi_cntrl); | |
1183 | ||
1184 | if (ret) | |
1185 | return ret; | |
1186 | ||
1187 | wait_event_timeout(mhi_cntrl->state_event, | |
1188 | MHI_IN_MISSION_MODE(mhi_cntrl->ee) || | |
1189 | MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state), | |
1190 | msecs_to_jiffies(mhi_cntrl->timeout_ms)); | |
1191 | ||
ce312258 JH |
1192 | ret = (MHI_IN_MISSION_MODE(mhi_cntrl->ee)) ? 0 : -ETIMEDOUT; |
1193 | if (ret) | |
1194 | mhi_power_down(mhi_cntrl, false); | |
1195 | ||
1196 | return ret; | |
3000f85b MS |
1197 | } |
1198 | EXPORT_SYMBOL(mhi_sync_power_up); | |
6fdfdd27 MS |
1199 | |
1200 | int mhi_force_rddm_mode(struct mhi_controller *mhi_cntrl) | |
1201 | { | |
1202 | struct device *dev = &mhi_cntrl->mhi_dev->dev; | |
1203 | int ret; | |
1204 | ||
1205 | /* Check if device is already in RDDM */ | |
1206 | if (mhi_cntrl->ee == MHI_EE_RDDM) | |
1207 | return 0; | |
1208 | ||
1209 | dev_dbg(dev, "Triggering SYS_ERR to force RDDM state\n"); | |
1210 | mhi_set_mhi_state(mhi_cntrl, MHI_STATE_SYS_ERR); | |
1211 | ||
1212 | /* Wait for RDDM event */ | |
1213 | ret = wait_event_timeout(mhi_cntrl->state_event, | |
1214 | mhi_cntrl->ee == MHI_EE_RDDM, | |
1215 | msecs_to_jiffies(mhi_cntrl->timeout_ms)); | |
1216 | ret = ret ? 0 : -EIO; | |
1217 | ||
1218 | return ret; | |
1219 | } | |
1220 | EXPORT_SYMBOL_GPL(mhi_force_rddm_mode); | |
189ff97c MS |
1221 | |
1222 | void mhi_device_get(struct mhi_device *mhi_dev) | |
1223 | { | |
1224 | struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl; | |
1225 | ||
1226 | mhi_dev->dev_wake++; | |
1227 | read_lock_bh(&mhi_cntrl->pm_lock); | |
870f81bd BB |
1228 | if (MHI_PM_IN_SUSPEND_STATE(mhi_cntrl->pm_state)) |
1229 | mhi_trigger_resume(mhi_cntrl); | |
1230 | ||
189ff97c MS |
1231 | mhi_cntrl->wake_get(mhi_cntrl, true); |
1232 | read_unlock_bh(&mhi_cntrl->pm_lock); | |
1233 | } | |
1234 | EXPORT_SYMBOL_GPL(mhi_device_get); | |
1235 | ||
1236 | int mhi_device_get_sync(struct mhi_device *mhi_dev) | |
1237 | { | |
1238 | struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl; | |
1239 | int ret; | |
1240 | ||
1241 | ret = __mhi_device_get_sync(mhi_cntrl); | |
1242 | if (!ret) | |
1243 | mhi_dev->dev_wake++; | |
1244 | ||
1245 | return ret; | |
1246 | } | |
1247 | EXPORT_SYMBOL_GPL(mhi_device_get_sync); | |
1248 | ||
1249 | void mhi_device_put(struct mhi_device *mhi_dev) | |
1250 | { | |
1251 | struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl; | |
1252 | ||
1253 | mhi_dev->dev_wake--; | |
1254 | read_lock_bh(&mhi_cntrl->pm_lock); | |
8b53087c BB |
1255 | if (MHI_PM_IN_SUSPEND_STATE(mhi_cntrl->pm_state)) |
1256 | mhi_trigger_resume(mhi_cntrl); | |
189ff97c MS |
1257 | |
1258 | mhi_cntrl->wake_put(mhi_cntrl, false); | |
1259 | read_unlock_bh(&mhi_cntrl->pm_lock); | |
1260 | } | |
1261 | EXPORT_SYMBOL_GPL(mhi_device_put); |