]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blob - include/linux/mhi.h
mm: fs: invalidate bh_lrus for only cold path
[mirror_ubuntu-jammy-kernel.git] / include / linux / mhi.h
1 /* SPDX-License-Identifier: GPL-2.0 */
2 /*
3 * Copyright (c) 2018-2020, The Linux Foundation. All rights reserved.
4 *
5 */
6 #ifndef _MHI_H_
7 #define _MHI_H_
8
9 #include <linux/device.h>
10 #include <linux/dma-direction.h>
11 #include <linux/mutex.h>
12 #include <linux/skbuff.h>
13 #include <linux/slab.h>
14 #include <linux/spinlock.h>
15 #include <linux/wait.h>
16 #include <linux/workqueue.h>
17
18 #define MHI_MAX_OEM_PK_HASH_SEGMENTS 16
19
20 struct mhi_chan;
21 struct mhi_event;
22 struct mhi_ctxt;
23 struct mhi_cmd;
24 struct mhi_buf_info;
25
26 /**
27 * enum mhi_callback - MHI callback
28 * @MHI_CB_IDLE: MHI entered idle state
29 * @MHI_CB_PENDING_DATA: New data available for client to process
30 * @MHI_CB_LPM_ENTER: MHI host entered low power mode
31 * @MHI_CB_LPM_EXIT: MHI host about to exit low power mode
32 * @MHI_CB_EE_RDDM: MHI device entered RDDM exec env
33 * @MHI_CB_EE_MISSION_MODE: MHI device entered Mission Mode exec env
34 * @MHI_CB_SYS_ERROR: MHI device entered error state (may recover)
35 * @MHI_CB_FATAL_ERROR: MHI device entered fatal error state
36 * @MHI_CB_BW_REQ: Received a bandwidth switch request from device
37 */
38 enum mhi_callback {
39 MHI_CB_IDLE,
40 MHI_CB_PENDING_DATA,
41 MHI_CB_LPM_ENTER,
42 MHI_CB_LPM_EXIT,
43 MHI_CB_EE_RDDM,
44 MHI_CB_EE_MISSION_MODE,
45 MHI_CB_SYS_ERROR,
46 MHI_CB_FATAL_ERROR,
47 MHI_CB_BW_REQ,
48 };
49
50 /**
51 * enum mhi_flags - Transfer flags
52 * @MHI_EOB: End of buffer for bulk transfer
53 * @MHI_EOT: End of transfer
54 * @MHI_CHAIN: Linked transfer
55 */
56 enum mhi_flags {
57 MHI_EOB = BIT(0),
58 MHI_EOT = BIT(1),
59 MHI_CHAIN = BIT(2),
60 };
61
62 /**
63 * enum mhi_device_type - Device types
64 * @MHI_DEVICE_XFER: Handles data transfer
65 * @MHI_DEVICE_CONTROLLER: Control device
66 */
67 enum mhi_device_type {
68 MHI_DEVICE_XFER,
69 MHI_DEVICE_CONTROLLER,
70 };
71
72 /**
73 * enum mhi_ch_type - Channel types
74 * @MHI_CH_TYPE_INVALID: Invalid channel type
75 * @MHI_CH_TYPE_OUTBOUND: Outbound channel to the device
76 * @MHI_CH_TYPE_INBOUND: Inbound channel from the device
77 * @MHI_CH_TYPE_INBOUND_COALESCED: Coalesced channel for the device to combine
78 * multiple packets and send them as a single
79 * large packet to reduce CPU consumption
80 */
81 enum mhi_ch_type {
82 MHI_CH_TYPE_INVALID = 0,
83 MHI_CH_TYPE_OUTBOUND = DMA_TO_DEVICE,
84 MHI_CH_TYPE_INBOUND = DMA_FROM_DEVICE,
85 MHI_CH_TYPE_INBOUND_COALESCED = 3,
86 };
87
88 /**
89 * struct image_info - Firmware and RDDM table
90 * @mhi_buf: Buffer for firmware and RDDM table
91 * @entries: # of entries in table
92 */
93 struct image_info {
94 struct mhi_buf *mhi_buf;
95 /* private: from internal.h */
96 struct bhi_vec_entry *bhi_vec;
97 /* public: */
98 u32 entries;
99 };
100
101 /**
102 * struct mhi_link_info - BW requirement
103 * target_link_speed - Link speed as defined by TLS bits in LinkControl reg
104 * target_link_width - Link width as defined by NLW bits in LinkStatus reg
105 */
106 struct mhi_link_info {
107 unsigned int target_link_speed;
108 unsigned int target_link_width;
109 };
110
111 /**
112 * enum mhi_ee_type - Execution environment types
113 * @MHI_EE_PBL: Primary Bootloader
114 * @MHI_EE_SBL: Secondary Bootloader
115 * @MHI_EE_AMSS: Modem, aka the primary runtime EE
116 * @MHI_EE_RDDM: Ram dump download mode
117 * @MHI_EE_WFW: WLAN firmware mode
118 * @MHI_EE_PTHRU: Passthrough
119 * @MHI_EE_EDL: Embedded downloader
120 * @MHI_EE_FP: Flash Programmer Environment
121 */
122 enum mhi_ee_type {
123 MHI_EE_PBL,
124 MHI_EE_SBL,
125 MHI_EE_AMSS,
126 MHI_EE_RDDM,
127 MHI_EE_WFW,
128 MHI_EE_PTHRU,
129 MHI_EE_EDL,
130 MHI_EE_FP,
131 MHI_EE_MAX_SUPPORTED = MHI_EE_FP,
132 MHI_EE_DISABLE_TRANSITION, /* local EE, not related to mhi spec */
133 MHI_EE_NOT_SUPPORTED,
134 MHI_EE_MAX,
135 };
136
137 /**
138 * enum mhi_state - MHI states
139 * @MHI_STATE_RESET: Reset state
140 * @MHI_STATE_READY: Ready state
141 * @MHI_STATE_M0: M0 state
142 * @MHI_STATE_M1: M1 state
143 * @MHI_STATE_M2: M2 state
144 * @MHI_STATE_M3: M3 state
145 * @MHI_STATE_M3_FAST: M3 Fast state
146 * @MHI_STATE_BHI: BHI state
147 * @MHI_STATE_SYS_ERR: System Error state
148 */
149 enum mhi_state {
150 MHI_STATE_RESET = 0x0,
151 MHI_STATE_READY = 0x1,
152 MHI_STATE_M0 = 0x2,
153 MHI_STATE_M1 = 0x3,
154 MHI_STATE_M2 = 0x4,
155 MHI_STATE_M3 = 0x5,
156 MHI_STATE_M3_FAST = 0x6,
157 MHI_STATE_BHI = 0x7,
158 MHI_STATE_SYS_ERR = 0xFF,
159 MHI_STATE_MAX,
160 };
161
162 /**
163 * enum mhi_ch_ee_mask - Execution environment mask for channel
164 * @MHI_CH_EE_PBL: Allow channel to be used in PBL EE
165 * @MHI_CH_EE_SBL: Allow channel to be used in SBL EE
166 * @MHI_CH_EE_AMSS: Allow channel to be used in AMSS EE
167 * @MHI_CH_EE_RDDM: Allow channel to be used in RDDM EE
168 * @MHI_CH_EE_PTHRU: Allow channel to be used in PTHRU EE
169 * @MHI_CH_EE_WFW: Allow channel to be used in WFW EE
170 * @MHI_CH_EE_EDL: Allow channel to be used in EDL EE
171 */
172 enum mhi_ch_ee_mask {
173 MHI_CH_EE_PBL = BIT(MHI_EE_PBL),
174 MHI_CH_EE_SBL = BIT(MHI_EE_SBL),
175 MHI_CH_EE_AMSS = BIT(MHI_EE_AMSS),
176 MHI_CH_EE_RDDM = BIT(MHI_EE_RDDM),
177 MHI_CH_EE_PTHRU = BIT(MHI_EE_PTHRU),
178 MHI_CH_EE_WFW = BIT(MHI_EE_WFW),
179 MHI_CH_EE_EDL = BIT(MHI_EE_EDL),
180 };
181
182 /**
183 * enum mhi_er_data_type - Event ring data types
184 * @MHI_ER_DATA: Only client data over this ring
185 * @MHI_ER_CTRL: MHI control data and client data
186 */
187 enum mhi_er_data_type {
188 MHI_ER_DATA,
189 MHI_ER_CTRL,
190 };
191
192 /**
193 * enum mhi_db_brst_mode - Doorbell mode
194 * @MHI_DB_BRST_DISABLE: Burst mode disable
195 * @MHI_DB_BRST_ENABLE: Burst mode enable
196 */
197 enum mhi_db_brst_mode {
198 MHI_DB_BRST_DISABLE = 0x2,
199 MHI_DB_BRST_ENABLE = 0x3,
200 };
201
202 /**
203 * struct mhi_channel_config - Channel configuration structure for controller
204 * @name: The name of this channel
205 * @num: The number assigned to this channel
206 * @num_elements: The number of elements that can be queued to this channel
207 * @local_elements: The local ring length of the channel
208 * @event_ring: The event ring index that services this channel
209 * @dir: Direction that data may flow on this channel
210 * @type: Channel type
211 * @ee_mask: Execution Environment mask for this channel
212 * @pollcfg: Polling configuration for burst mode. 0 is default. milliseconds
213 for UL channels, multiple of 8 ring elements for DL channels
214 * @doorbell: Doorbell mode
215 * @lpm_notify: The channel master requires low power mode notifications
216 * @offload_channel: The client manages the channel completely
217 * @doorbell_mode_switch: Channel switches to doorbell mode on M0 transition
218 * @auto_queue: Framework will automatically queue buffers for DL traffic
219 * @wake-capable: Channel capable of waking up the system
220 */
221 struct mhi_channel_config {
222 char *name;
223 u32 num;
224 u32 num_elements;
225 u32 local_elements;
226 u32 event_ring;
227 enum dma_data_direction dir;
228 enum mhi_ch_type type;
229 u32 ee_mask;
230 u32 pollcfg;
231 enum mhi_db_brst_mode doorbell;
232 bool lpm_notify;
233 bool offload_channel;
234 bool doorbell_mode_switch;
235 bool auto_queue;
236 bool wake_capable;
237 };
238
239 /**
240 * struct mhi_event_config - Event ring configuration structure for controller
241 * @num_elements: The number of elements that can be queued to this ring
242 * @irq_moderation_ms: Delay irq for additional events to be aggregated
243 * @irq: IRQ associated with this ring
244 * @channel: Dedicated channel number. U32_MAX indicates a non-dedicated ring
245 * @priority: Priority of this ring. Use 1 for now
246 * @mode: Doorbell mode
247 * @data_type: Type of data this ring will process
248 * @hardware_event: This ring is associated with hardware channels
249 * @client_managed: This ring is client managed
250 * @offload_channel: This ring is associated with an offloaded channel
251 */
252 struct mhi_event_config {
253 u32 num_elements;
254 u32 irq_moderation_ms;
255 u32 irq;
256 u32 channel;
257 u32 priority;
258 enum mhi_db_brst_mode mode;
259 enum mhi_er_data_type data_type;
260 bool hardware_event;
261 bool client_managed;
262 bool offload_channel;
263 };
264
265 /**
266 * struct mhi_controller_config - Root MHI controller configuration
267 * @max_channels: Maximum number of channels supported
268 * @timeout_ms: Timeout value for operations. 0 means use default
269 * @buf_len: Size of automatically allocated buffers. 0 means use default
270 * @num_channels: Number of channels defined in @ch_cfg
271 * @ch_cfg: Array of defined channels
272 * @num_events: Number of event rings defined in @event_cfg
273 * @event_cfg: Array of defined event rings
274 * @use_bounce_buf: Use a bounce buffer pool due to limited DDR access
275 * @m2_no_db: Host is not allowed to ring DB in M2 state
276 */
277 struct mhi_controller_config {
278 u32 max_channels;
279 u32 timeout_ms;
280 u32 buf_len;
281 u32 num_channels;
282 const struct mhi_channel_config *ch_cfg;
283 u32 num_events;
284 struct mhi_event_config *event_cfg;
285 bool use_bounce_buf;
286 bool m2_no_db;
287 };
288
289 /**
290 * struct mhi_controller - Master MHI controller structure
291 * @cntrl_dev: Pointer to the struct device of physical bus acting as the MHI
292 * controller (required)
293 * @mhi_dev: MHI device instance for the controller
294 * @debugfs_dentry: MHI controller debugfs directory
295 * @regs: Base address of MHI MMIO register space (required)
296 * @bhi: Points to base of MHI BHI register space
297 * @bhie: Points to base of MHI BHIe register space
298 * @wake_db: MHI WAKE doorbell register address
299 * @iova_start: IOMMU starting address for data (required)
300 * @iova_stop: IOMMU stop address for data (required)
301 * @fw_image: Firmware image name for normal booting (optional)
302 * @edl_image: Firmware image name for emergency download mode (optional)
303 * @rddm_size: RAM dump size that host should allocate for debugging purpose
304 * @sbl_size: SBL image size downloaded through BHIe (optional)
305 * @seg_len: BHIe vector size (optional)
306 * @reg_len: Length of the MHI MMIO region (required)
307 * @fbc_image: Points to firmware image buffer
308 * @rddm_image: Points to RAM dump buffer
309 * @mhi_chan: Points to the channel configuration table
310 * @lpm_chans: List of channels that require LPM notifications
311 * @irq: base irq # to request (required)
312 * @max_chan: Maximum number of channels the controller supports
313 * @total_ev_rings: Total # of event rings allocated
314 * @hw_ev_rings: Number of hardware event rings
315 * @sw_ev_rings: Number of software event rings
316 * @nr_irqs: Number of IRQ allocated by bus master (required)
317 * @family_number: MHI controller family number
318 * @device_number: MHI controller device number
319 * @major_version: MHI controller major revision number
320 * @minor_version: MHI controller minor revision number
321 * @serial_number: MHI controller serial number obtained from BHI
322 * @oem_pk_hash: MHI controller OEM PK Hash obtained from BHI
323 * @mhi_event: MHI event ring configurations table
324 * @mhi_cmd: MHI command ring configurations table
325 * @mhi_ctxt: MHI device context, shared memory between host and device
326 * @pm_mutex: Mutex for suspend/resume operation
327 * @pm_lock: Lock for protecting MHI power management state
328 * @timeout_ms: Timeout in ms for state transitions
329 * @pm_state: MHI power management state
330 * @db_access: DB access states
331 * @ee: MHI device execution environment
332 * @dev_state: MHI device state
333 * @dev_wake: Device wakeup count
334 * @pending_pkts: Pending packets for the controller
335 * @M0, M2, M3: Counters to track number of device MHI state changes
336 * @transition_list: List of MHI state transitions
337 * @transition_lock: Lock for protecting MHI state transition list
338 * @wlock: Lock for protecting device wakeup
339 * @mhi_link_info: Device bandwidth info
340 * @st_worker: State transition worker
341 * @hiprio_wq: High priority workqueue for MHI work such as state transitions
342 * @state_event: State change event
343 * @status_cb: CB function to notify power states of the device (required)
344 * @wake_get: CB function to assert device wake (optional)
345 * @wake_put: CB function to de-assert device wake (optional)
346 * @wake_toggle: CB function to assert and de-assert device wake (optional)
347 * @runtime_get: CB function to controller runtime resume (required)
348 * @runtime_put: CB function to decrement pm usage (required)
349 * @map_single: CB function to create TRE buffer
350 * @unmap_single: CB function to destroy TRE buffer
351 * @read_reg: Read a MHI register via the physical link (required)
352 * @write_reg: Write a MHI register via the physical link (required)
353 * @reset: Controller specific reset function (optional)
354 * @buffer_len: Bounce buffer length
355 * @index: Index of the MHI controller instance
356 * @bounce_buf: Use of bounce buffer
357 * @fbc_download: MHI host needs to do complete image transfer (optional)
358 * @wake_set: Device wakeup set flag
359 * @irq_flags: irq flags passed to request_irq (optional)
360 * @mru: the default MRU for the MHI device
361 *
362 * Fields marked as (required) need to be populated by the controller driver
363 * before calling mhi_register_controller(). For the fields marked as (optional)
364 * they can be populated depending on the usecase.
365 *
366 * The following fields are present for the purpose of implementing any device
367 * specific quirks or customizations for specific MHI revisions used in device
368 * by the controller drivers. The MHI stack will just populate these fields
369 * during mhi_register_controller():
370 * family_number
371 * device_number
372 * major_version
373 * minor_version
374 */
375 struct mhi_controller {
376 struct device *cntrl_dev;
377 struct mhi_device *mhi_dev;
378 struct dentry *debugfs_dentry;
379 void __iomem *regs;
380 void __iomem *bhi;
381 void __iomem *bhie;
382 void __iomem *wake_db;
383
384 dma_addr_t iova_start;
385 dma_addr_t iova_stop;
386 const char *fw_image;
387 const char *edl_image;
388 size_t rddm_size;
389 size_t sbl_size;
390 size_t seg_len;
391 size_t reg_len;
392 struct image_info *fbc_image;
393 struct image_info *rddm_image;
394 struct mhi_chan *mhi_chan;
395 struct list_head lpm_chans;
396 int *irq;
397 u32 max_chan;
398 u32 total_ev_rings;
399 u32 hw_ev_rings;
400 u32 sw_ev_rings;
401 u32 nr_irqs;
402 u32 family_number;
403 u32 device_number;
404 u32 major_version;
405 u32 minor_version;
406 u32 serial_number;
407 u32 oem_pk_hash[MHI_MAX_OEM_PK_HASH_SEGMENTS];
408
409 struct mhi_event *mhi_event;
410 struct mhi_cmd *mhi_cmd;
411 struct mhi_ctxt *mhi_ctxt;
412
413 struct mutex pm_mutex;
414 rwlock_t pm_lock;
415 u32 timeout_ms;
416 u32 pm_state;
417 u32 db_access;
418 enum mhi_ee_type ee;
419 enum mhi_state dev_state;
420 atomic_t dev_wake;
421 atomic_t pending_pkts;
422 u32 M0, M2, M3;
423 struct list_head transition_list;
424 spinlock_t transition_lock;
425 spinlock_t wlock;
426 struct mhi_link_info mhi_link_info;
427 struct work_struct st_worker;
428 struct workqueue_struct *hiprio_wq;
429 wait_queue_head_t state_event;
430
431 void (*status_cb)(struct mhi_controller *mhi_cntrl,
432 enum mhi_callback cb);
433 void (*wake_get)(struct mhi_controller *mhi_cntrl, bool override);
434 void (*wake_put)(struct mhi_controller *mhi_cntrl, bool override);
435 void (*wake_toggle)(struct mhi_controller *mhi_cntrl);
436 int (*runtime_get)(struct mhi_controller *mhi_cntrl);
437 void (*runtime_put)(struct mhi_controller *mhi_cntrl);
438 int (*map_single)(struct mhi_controller *mhi_cntrl,
439 struct mhi_buf_info *buf);
440 void (*unmap_single)(struct mhi_controller *mhi_cntrl,
441 struct mhi_buf_info *buf);
442 int (*read_reg)(struct mhi_controller *mhi_cntrl, void __iomem *addr,
443 u32 *out);
444 void (*write_reg)(struct mhi_controller *mhi_cntrl, void __iomem *addr,
445 u32 val);
446 void (*reset)(struct mhi_controller *mhi_cntrl);
447
448 size_t buffer_len;
449 int index;
450 bool bounce_buf;
451 bool fbc_download;
452 bool wake_set;
453 unsigned long irq_flags;
454 u32 mru;
455 };
456
457 /**
458 * struct mhi_device - Structure representing an MHI device which binds
459 * to channels or is associated with controllers
460 * @id: Pointer to MHI device ID struct
461 * @name: Name of the associated MHI device
462 * @mhi_cntrl: Controller the device belongs to
463 * @ul_chan: UL channel for the device
464 * @dl_chan: DL channel for the device
465 * @dev: Driver model device node for the MHI device
466 * @dev_type: MHI device type
467 * @ul_chan_id: MHI channel id for UL transfer
468 * @dl_chan_id: MHI channel id for DL transfer
469 * @dev_wake: Device wakeup counter
470 */
471 struct mhi_device {
472 const struct mhi_device_id *id;
473 const char *name;
474 struct mhi_controller *mhi_cntrl;
475 struct mhi_chan *ul_chan;
476 struct mhi_chan *dl_chan;
477 struct device dev;
478 enum mhi_device_type dev_type;
479 int ul_chan_id;
480 int dl_chan_id;
481 u32 dev_wake;
482 };
483
484 /**
485 * struct mhi_result - Completed buffer information
486 * @buf_addr: Address of data buffer
487 * @bytes_xferd: # of bytes transferred
488 * @dir: Channel direction
489 * @transaction_status: Status of last transaction
490 */
491 struct mhi_result {
492 void *buf_addr;
493 size_t bytes_xferd;
494 enum dma_data_direction dir;
495 int transaction_status;
496 };
497
498 /**
499 * struct mhi_buf - MHI Buffer description
500 * @buf: Virtual address of the buffer
501 * @name: Buffer label. For offload channel, configurations name must be:
502 * ECA - Event context array data
503 * CCA - Channel context array data
504 * @dma_addr: IOMMU address of the buffer
505 * @len: # of bytes
506 */
507 struct mhi_buf {
508 void *buf;
509 const char *name;
510 dma_addr_t dma_addr;
511 size_t len;
512 };
513
514 /**
515 * struct mhi_driver - Structure representing a MHI client driver
516 * @probe: CB function for client driver probe function
517 * @remove: CB function for client driver remove function
518 * @ul_xfer_cb: CB function for UL data transfer
519 * @dl_xfer_cb: CB function for DL data transfer
520 * @status_cb: CB functions for asynchronous status
521 * @driver: Device driver model driver
522 */
523 struct mhi_driver {
524 const struct mhi_device_id *id_table;
525 int (*probe)(struct mhi_device *mhi_dev,
526 const struct mhi_device_id *id);
527 void (*remove)(struct mhi_device *mhi_dev);
528 void (*ul_xfer_cb)(struct mhi_device *mhi_dev,
529 struct mhi_result *result);
530 void (*dl_xfer_cb)(struct mhi_device *mhi_dev,
531 struct mhi_result *result);
532 void (*status_cb)(struct mhi_device *mhi_dev, enum mhi_callback mhi_cb);
533 struct device_driver driver;
534 };
535
536 #define to_mhi_driver(drv) container_of(drv, struct mhi_driver, driver)
537 #define to_mhi_device(dev) container_of(dev, struct mhi_device, dev)
538
539 /**
540 * mhi_alloc_controller - Allocate the MHI Controller structure
541 * Allocate the mhi_controller structure using zero initialized memory
542 */
543 struct mhi_controller *mhi_alloc_controller(void);
544
545 /**
546 * mhi_free_controller - Free the MHI Controller structure
547 * Free the mhi_controller structure which was previously allocated
548 */
549 void mhi_free_controller(struct mhi_controller *mhi_cntrl);
550
551 /**
552 * mhi_register_controller - Register MHI controller
553 * @mhi_cntrl: MHI controller to register
554 * @config: Configuration to use for the controller
555 */
556 int mhi_register_controller(struct mhi_controller *mhi_cntrl,
557 const struct mhi_controller_config *config);
558
559 /**
560 * mhi_unregister_controller - Unregister MHI controller
561 * @mhi_cntrl: MHI controller to unregister
562 */
563 void mhi_unregister_controller(struct mhi_controller *mhi_cntrl);
564
565 /*
566 * module_mhi_driver() - Helper macro for drivers that don't do
567 * anything special other than using default mhi_driver_register() and
568 * mhi_driver_unregister(). This eliminates a lot of boilerplate.
569 * Each module may only use this macro once.
570 */
571 #define module_mhi_driver(mhi_drv) \
572 module_driver(mhi_drv, mhi_driver_register, \
573 mhi_driver_unregister)
574
575 /*
576 * Macro to avoid include chaining to get THIS_MODULE
577 */
578 #define mhi_driver_register(mhi_drv) \
579 __mhi_driver_register(mhi_drv, THIS_MODULE)
580
581 /**
582 * __mhi_driver_register - Register driver with MHI framework
583 * @mhi_drv: Driver associated with the device
584 * @owner: The module owner
585 */
586 int __mhi_driver_register(struct mhi_driver *mhi_drv, struct module *owner);
587
588 /**
589 * mhi_driver_unregister - Unregister a driver for mhi_devices
590 * @mhi_drv: Driver associated with the device
591 */
592 void mhi_driver_unregister(struct mhi_driver *mhi_drv);
593
594 /**
595 * mhi_set_mhi_state - Set MHI device state
596 * @mhi_cntrl: MHI controller
597 * @state: State to set
598 */
599 void mhi_set_mhi_state(struct mhi_controller *mhi_cntrl,
600 enum mhi_state state);
601
602 /**
603 * mhi_notify - Notify the MHI client driver about client device status
604 * @mhi_dev: MHI device instance
605 * @cb_reason: MHI callback reason
606 */
607 void mhi_notify(struct mhi_device *mhi_dev, enum mhi_callback cb_reason);
608
609 /**
610 * mhi_get_free_desc_count - Get transfer ring length
611 * Get # of TD available to queue buffers
612 * @mhi_dev: Device associated with the channels
613 * @dir: Direction of the channel
614 */
615 int mhi_get_free_desc_count(struct mhi_device *mhi_dev,
616 enum dma_data_direction dir);
617
618 /**
619 * mhi_prepare_for_power_up - Do pre-initialization before power up.
620 * This is optional, call this before power up if
621 * the controller does not want bus framework to
622 * automatically free any allocated memory during
623 * shutdown process.
624 * @mhi_cntrl: MHI controller
625 */
626 int mhi_prepare_for_power_up(struct mhi_controller *mhi_cntrl);
627
628 /**
629 * mhi_async_power_up - Start MHI power up sequence
630 * @mhi_cntrl: MHI controller
631 */
632 int mhi_async_power_up(struct mhi_controller *mhi_cntrl);
633
634 /**
635 * mhi_sync_power_up - Start MHI power up sequence and wait till the device
636 * enters valid EE state
637 * @mhi_cntrl: MHI controller
638 */
639 int mhi_sync_power_up(struct mhi_controller *mhi_cntrl);
640
641 /**
642 * mhi_power_down - Start MHI power down sequence
643 * @mhi_cntrl: MHI controller
644 * @graceful: Link is still accessible, so do a graceful shutdown process
645 */
646 void mhi_power_down(struct mhi_controller *mhi_cntrl, bool graceful);
647
648 /**
649 * mhi_unprepare_after_power_down - Free any allocated memory after power down
650 * @mhi_cntrl: MHI controller
651 */
652 void mhi_unprepare_after_power_down(struct mhi_controller *mhi_cntrl);
653
654 /**
655 * mhi_pm_suspend - Move MHI into a suspended state
656 * @mhi_cntrl: MHI controller
657 */
658 int mhi_pm_suspend(struct mhi_controller *mhi_cntrl);
659
660 /**
661 * mhi_pm_resume - Resume MHI from suspended state
662 * @mhi_cntrl: MHI controller
663 */
664 int mhi_pm_resume(struct mhi_controller *mhi_cntrl);
665
666 /**
667 * mhi_download_rddm_image - Download ramdump image from device for
668 * debugging purpose.
669 * @mhi_cntrl: MHI controller
670 * @in_panic: Download rddm image during kernel panic
671 */
672 int mhi_download_rddm_image(struct mhi_controller *mhi_cntrl, bool in_panic);
673
674 /**
675 * mhi_force_rddm_mode - Force device into rddm mode
676 * @mhi_cntrl: MHI controller
677 */
678 int mhi_force_rddm_mode(struct mhi_controller *mhi_cntrl);
679
680 /**
681 * mhi_get_exec_env - Get BHI execution environment of the device
682 * @mhi_cntrl: MHI controller
683 */
684 enum mhi_ee_type mhi_get_exec_env(struct mhi_controller *mhi_cntrl);
685
686 /**
687 * mhi_get_mhi_state - Get MHI state of the device
688 * @mhi_cntrl: MHI controller
689 */
690 enum mhi_state mhi_get_mhi_state(struct mhi_controller *mhi_cntrl);
691
692 /**
693 * mhi_soc_reset - Trigger a device reset. This can be used as a last resort
694 * to reset and recover a device.
695 * @mhi_cntrl: MHI controller
696 */
697 void mhi_soc_reset(struct mhi_controller *mhi_cntrl);
698
699 /**
700 * mhi_device_get - Disable device low power mode
701 * @mhi_dev: Device associated with the channel
702 */
703 void mhi_device_get(struct mhi_device *mhi_dev);
704
705 /**
706 * mhi_device_get_sync - Disable device low power mode. Synchronously
707 * take the controller out of suspended state
708 * @mhi_dev: Device associated with the channel
709 */
710 int mhi_device_get_sync(struct mhi_device *mhi_dev);
711
712 /**
713 * mhi_device_put - Re-enable device low power mode
714 * @mhi_dev: Device associated with the channel
715 */
716 void mhi_device_put(struct mhi_device *mhi_dev);
717
718 /**
719 * mhi_prepare_for_transfer - Setup UL and DL channels for data transfer.
720 * Allocate and initialize the channel context and
721 * also issue the START channel command to both
722 * channels. Channels can be started only if both
723 * host and device execution environments match and
724 * channels are in a DISABLED state.
725 * @mhi_dev: Device associated with the channels
726 */
727 int mhi_prepare_for_transfer(struct mhi_device *mhi_dev);
728
729 /**
730 * mhi_unprepare_from_transfer - Reset UL and DL channels for data transfer.
731 * Issue the RESET channel command and let the
732 * device clean-up the context so no incoming
733 * transfers are seen on the host. Free memory
734 * associated with the context on host. If device
735 * is unresponsive, only perform a host side
736 * clean-up. Channels can be reset only if both
737 * host and device execution environments match
738 * and channels are in an ENABLED, STOPPED or
739 * SUSPENDED state.
740 * @mhi_dev: Device associated with the channels
741 */
742 void mhi_unprepare_from_transfer(struct mhi_device *mhi_dev);
743
744 /**
745 * mhi_poll - Poll for any available data in DL direction
746 * @mhi_dev: Device associated with the channels
747 * @budget: # of events to process
748 */
749 int mhi_poll(struct mhi_device *mhi_dev, u32 budget);
750
751 /**
752 * mhi_queue_dma - Send or receive DMA mapped buffers from client device
753 * over MHI channel
754 * @mhi_dev: Device associated with the channels
755 * @dir: DMA direction for the channel
756 * @mhi_buf: Buffer for holding the DMA mapped data
757 * @len: Buffer length
758 * @mflags: MHI transfer flags used for the transfer
759 */
760 int mhi_queue_dma(struct mhi_device *mhi_dev, enum dma_data_direction dir,
761 struct mhi_buf *mhi_buf, size_t len, enum mhi_flags mflags);
762
763 /**
764 * mhi_queue_buf - Send or receive raw buffers from client device over MHI
765 * channel
766 * @mhi_dev: Device associated with the channels
767 * @dir: DMA direction for the channel
768 * @buf: Buffer for holding the data
769 * @len: Buffer length
770 * @mflags: MHI transfer flags used for the transfer
771 */
772 int mhi_queue_buf(struct mhi_device *mhi_dev, enum dma_data_direction dir,
773 void *buf, size_t len, enum mhi_flags mflags);
774
775 /**
776 * mhi_queue_skb - Send or receive SKBs from client device over MHI channel
777 * @mhi_dev: Device associated with the channels
778 * @dir: DMA direction for the channel
779 * @skb: Buffer for holding SKBs
780 * @len: Buffer length
781 * @mflags: MHI transfer flags used for the transfer
782 */
783 int mhi_queue_skb(struct mhi_device *mhi_dev, enum dma_data_direction dir,
784 struct sk_buff *skb, size_t len, enum mhi_flags mflags);
785
786 /**
787 * mhi_queue_is_full - Determine whether queueing new elements is possible
788 * @mhi_dev: Device associated with the channels
789 * @dir: DMA direction for the channel
790 */
791 bool mhi_queue_is_full(struct mhi_device *mhi_dev, enum dma_data_direction dir);
792
793 #endif /* _MHI_H_ */