3 * Intel Management Engine Interface (Intel MEI) Linux driver
4 * Copyright (c) 2003-2012, Intel Corporation.
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation.
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
18 #include <linux/export.h>
19 #include <linux/pci.h>
20 #include <linux/kthread.h>
21 #include <linux/interrupt.h>
23 #include <linux/jiffies.h>
25 #include <linux/mei.h>
34 * mei_cl_complete_handler - processes completed operation for a client
36 * @cl: private data of the file object.
37 * @cb: callback block.
39 static void mei_cl_complete_handler(struct mei_cl
*cl
, struct mei_cl_cb
*cb
)
41 if (cb
->fop_type
== MEI_FOP_WRITE
) {
44 cl
->writing_state
= MEI_WRITE_COMPLETE
;
45 if (waitqueue_active(&cl
->tx_wait
))
46 wake_up_interruptible(&cl
->tx_wait
);
48 } else if (cb
->fop_type
== MEI_FOP_READ
&&
49 MEI_READING
== cl
->reading_state
) {
50 cl
->reading_state
= MEI_READ_COMPLETE
;
51 if (waitqueue_active(&cl
->rx_wait
))
52 wake_up_interruptible(&cl
->rx_wait
);
54 mei_cl_bus_rx_event(cl
);
60 * mei_irq_compl_handler - dispatch complete handelers
61 * for the completed callbacks
64 * @compl_list - list of completed cbs
66 void mei_irq_compl_handler(struct mei_device
*dev
, struct mei_cl_cb
*compl_list
)
68 struct mei_cl_cb
*cb
, *next
;
71 list_for_each_entry_safe(cb
, next
, &compl_list
->list
, list
) {
77 dev_dbg(&dev
->pdev
->dev
, "completing call back.\n");
78 if (cl
== &dev
->iamthif_cl
)
79 mei_amthif_complete(dev
, cb
);
81 mei_cl_complete_handler(cl
, cb
);
84 EXPORT_SYMBOL_GPL(mei_irq_compl_handler
);
86 * _mei_irq_thread_state_ok - checks if mei header matches file private data
88 * @cl: private data of the file object
89 * @mei_hdr: header of mei client message
91 * returns !=0 if matches, 0 if no match.
93 static int _mei_irq_thread_state_ok(struct mei_cl
*cl
,
94 struct mei_msg_hdr
*mei_hdr
)
96 return (cl
->host_client_id
== mei_hdr
->host_addr
&&
97 cl
->me_client_id
== mei_hdr
->me_addr
&&
98 cl
->state
== MEI_FILE_CONNECTED
&&
99 MEI_READ_COMPLETE
!= cl
->reading_state
);
103 * mei_irq_thread_read_client_message - bottom half read routine after ISR to
104 * handle the read mei client message data processing.
106 * @complete_list: An instance of our list structure
107 * @dev: the device structure
108 * @mei_hdr: header of mei client message
110 * returns 0 on success, <0 on failure.
112 static int mei_irq_thread_read_client_message(struct mei_cl_cb
*complete_list
,
113 struct mei_device
*dev
,
114 struct mei_msg_hdr
*mei_hdr
)
117 struct mei_cl_cb
*cb_pos
= NULL
, *cb_next
= NULL
;
118 unsigned char *buffer
= NULL
;
120 dev_dbg(&dev
->pdev
->dev
, "start client msg\n");
121 if (list_empty(&dev
->read_list
.list
))
124 list_for_each_entry_safe(cb_pos
, cb_next
, &dev
->read_list
.list
, list
) {
126 if (cl
&& _mei_irq_thread_state_ok(cl
, mei_hdr
)) {
127 cl
->reading_state
= MEI_READING
;
128 buffer
= cb_pos
->response_buffer
.data
+ cb_pos
->buf_idx
;
130 if (cb_pos
->response_buffer
.size
<
131 mei_hdr
->length
+ cb_pos
->buf_idx
) {
132 dev_dbg(&dev
->pdev
->dev
, "message overflow.\n");
133 list_del(&cb_pos
->list
);
137 mei_read_slots(dev
, buffer
, mei_hdr
->length
);
139 cb_pos
->buf_idx
+= mei_hdr
->length
;
140 if (mei_hdr
->msg_complete
) {
142 list_del(&cb_pos
->list
);
143 dev_dbg(&dev
->pdev
->dev
,
144 "completed read H cl = %d, ME cl = %d, length = %lu\n",
149 list_add_tail(&cb_pos
->list
,
150 &complete_list
->list
);
159 dev_dbg(&dev
->pdev
->dev
, "message read\n");
161 mei_read_slots(dev
, dev
->rd_msg_buf
, mei_hdr
->length
);
162 dev_dbg(&dev
->pdev
->dev
, "discarding message " MEI_HDR_FMT
"\n",
163 MEI_HDR_PRM(mei_hdr
));
170 * _mei_irq_thread_close - processes close related operation.
172 * @dev: the device structure.
173 * @slots: free slots.
174 * @cb_pos: callback block.
175 * @cl: private data of the file object.
176 * @cmpl_list: complete list.
178 * returns 0, OK; otherwise, error.
180 static int _mei_irq_thread_close(struct mei_device
*dev
, s32
*slots
,
181 struct mei_cl_cb
*cb_pos
,
183 struct mei_cl_cb
*cmpl_list
)
186 mei_data2slots(sizeof(struct hbm_client_connect_request
));
188 if (*slots
< msg_slots
)
193 if (mei_hbm_cl_disconnect_req(dev
, cl
)) {
196 list_move_tail(&cb_pos
->list
, &cmpl_list
->list
);
200 cl
->state
= MEI_FILE_DISCONNECTING
;
203 list_move_tail(&cb_pos
->list
, &dev
->ctrl_rd_list
.list
);
204 cl
->timer_count
= MEI_CONNECT_TIMEOUT
;
211 * _mei_hb_read - processes read related operation.
213 * @dev: the device structure.
214 * @slots: free slots.
215 * @cb_pos: callback block.
216 * @cl: private data of the file object.
217 * @cmpl_list: complete list.
219 * returns 0, OK; otherwise, error.
221 static int _mei_irq_thread_read(struct mei_device
*dev
, s32
*slots
,
222 struct mei_cl_cb
*cb_pos
,
224 struct mei_cl_cb
*cmpl_list
)
226 u32 msg_slots
= mei_data2slots(sizeof(struct hbm_flow_control
));
228 if (*slots
< msg_slots
) {
229 /* return the cancel routine */
230 list_del(&cb_pos
->list
);
236 if (mei_hbm_cl_flow_control_req(dev
, cl
)) {
237 cl
->status
= -ENODEV
;
239 list_move_tail(&cb_pos
->list
, &cmpl_list
->list
);
242 list_move_tail(&cb_pos
->list
, &dev
->read_list
.list
);
249 * _mei_irq_thread_ioctl - processes ioctl related operation.
251 * @dev: the device structure.
252 * @slots: free slots.
253 * @cb_pos: callback block.
254 * @cl: private data of the file object.
255 * @cmpl_list: complete list.
257 * returns 0, OK; otherwise, error.
259 static int _mei_irq_thread_ioctl(struct mei_device
*dev
, s32
*slots
,
260 struct mei_cl_cb
*cb_pos
,
262 struct mei_cl_cb
*cmpl_list
)
265 mei_data2slots(sizeof(struct hbm_client_connect_request
));
267 if (*slots
< msg_slots
) {
268 /* return the cancel routine */
269 list_del(&cb_pos
->list
);
275 cl
->state
= MEI_FILE_CONNECTING
;
277 if (mei_hbm_cl_connect_req(dev
, cl
)) {
278 cl
->status
= -ENODEV
;
280 list_del(&cb_pos
->list
);
283 list_move_tail(&cb_pos
->list
, &dev
->ctrl_rd_list
.list
);
284 cl
->timer_count
= MEI_CONNECT_TIMEOUT
;
290 * mei_irq_thread_write_complete - write messages to device.
292 * @dev: the device structure.
293 * @slots: free slots.
294 * @cb: callback block.
295 * @cmpl_list: complete list.
297 * returns 0, OK; otherwise, error.
299 static int mei_irq_thread_write_complete(struct mei_device
*dev
, s32
*slots
,
300 struct mei_cl_cb
*cb
, struct mei_cl_cb
*cmpl_list
)
302 struct mei_msg_hdr mei_hdr
;
303 struct mei_cl
*cl
= cb
->cl
;
304 size_t len
= cb
->request_buffer
.size
- cb
->buf_idx
;
305 u32 msg_slots
= mei_data2slots(len
);
307 mei_hdr
.host_addr
= cl
->host_client_id
;
308 mei_hdr
.me_addr
= cl
->me_client_id
;
309 mei_hdr
.reserved
= 0;
311 if (*slots
>= msg_slots
) {
312 mei_hdr
.length
= len
;
313 mei_hdr
.msg_complete
= 1;
314 /* Split the message only if we can write the whole host buffer */
315 } else if (*slots
== dev
->hbuf_depth
) {
317 len
= (*slots
* sizeof(u32
)) - sizeof(struct mei_msg_hdr
);
318 mei_hdr
.length
= len
;
319 mei_hdr
.msg_complete
= 0;
321 /* wait for next time the host buffer is empty */
325 dev_dbg(&dev
->pdev
->dev
, "buf: size = %d idx = %lu\n",
326 cb
->request_buffer
.size
, cb
->buf_idx
);
327 dev_dbg(&dev
->pdev
->dev
, MEI_HDR_FMT
, MEI_HDR_PRM(&mei_hdr
));
330 if (mei_write_message(dev
, &mei_hdr
,
331 cb
->request_buffer
.data
+ cb
->buf_idx
)) {
332 cl
->status
= -ENODEV
;
333 list_move_tail(&cb
->list
, &cmpl_list
->list
);
337 if (mei_cl_flow_ctrl_reduce(cl
))
341 cb
->buf_idx
+= mei_hdr
.length
;
342 if (mei_hdr
.msg_complete
)
343 list_move_tail(&cb
->list
, &dev
->write_waiting_list
.list
);
349 * mei_irq_thread_read_handler - bottom half read routine after ISR to
350 * handle the read processing.
352 * @dev: the device structure
353 * @cmpl_list: An instance of our list structure
354 * @slots: slots to read.
356 * returns 0 on success, <0 on failure.
358 int mei_irq_read_handler(struct mei_device
*dev
,
359 struct mei_cl_cb
*cmpl_list
, s32
*slots
)
361 struct mei_msg_hdr
*mei_hdr
;
362 struct mei_cl
*cl_pos
= NULL
;
363 struct mei_cl
*cl_next
= NULL
;
366 if (!dev
->rd_msg_hdr
) {
367 dev
->rd_msg_hdr
= mei_read_hdr(dev
);
368 dev_dbg(&dev
->pdev
->dev
, "slots =%08x.\n", *slots
);
370 dev_dbg(&dev
->pdev
->dev
, "slots =%08x.\n", *slots
);
372 mei_hdr
= (struct mei_msg_hdr
*) &dev
->rd_msg_hdr
;
373 dev_dbg(&dev
->pdev
->dev
, MEI_HDR_FMT
, MEI_HDR_PRM(mei_hdr
));
375 if (mei_hdr
->reserved
|| !dev
->rd_msg_hdr
) {
376 dev_dbg(&dev
->pdev
->dev
, "corrupted message header.\n");
381 if (mei_hdr
->host_addr
|| mei_hdr
->me_addr
) {
382 list_for_each_entry_safe(cl_pos
, cl_next
,
383 &dev
->file_list
, link
) {
384 dev_dbg(&dev
->pdev
->dev
,
385 "list_for_each_entry_safe read host"
386 " client = %d, ME client = %d\n",
387 cl_pos
->host_client_id
,
388 cl_pos
->me_client_id
);
389 if (cl_pos
->host_client_id
== mei_hdr
->host_addr
&&
390 cl_pos
->me_client_id
== mei_hdr
->me_addr
)
394 if (&cl_pos
->link
== &dev
->file_list
) {
395 dev_dbg(&dev
->pdev
->dev
, "corrupted message header\n");
400 if (((*slots
) * sizeof(u32
)) < mei_hdr
->length
) {
401 dev_dbg(&dev
->pdev
->dev
,
402 "we can't read the message slots =%08x.\n",
404 /* we can't read the message */
409 /* decide where to read the message too */
410 if (!mei_hdr
->host_addr
) {
411 dev_dbg(&dev
->pdev
->dev
, "call mei_irq_thread_read_bus_message.\n");
412 mei_hbm_dispatch(dev
, mei_hdr
);
413 dev_dbg(&dev
->pdev
->dev
, "end mei_irq_thread_read_bus_message.\n");
414 } else if (mei_hdr
->host_addr
== dev
->iamthif_cl
.host_client_id
&&
415 (MEI_FILE_CONNECTED
== dev
->iamthif_cl
.state
) &&
416 (dev
->iamthif_state
== MEI_IAMTHIF_READING
)) {
417 dev_dbg(&dev
->pdev
->dev
, "call mei_irq_thread_read_iamthif_message.\n");
419 dev_dbg(&dev
->pdev
->dev
, MEI_HDR_FMT
, MEI_HDR_PRM(mei_hdr
));
421 ret
= mei_amthif_irq_read_message(cmpl_list
, dev
, mei_hdr
);
425 dev_dbg(&dev
->pdev
->dev
, "call mei_irq_thread_read_client_message.\n");
426 ret
= mei_irq_thread_read_client_message(cmpl_list
,
433 /* reset the number of slots and header */
434 *slots
= mei_count_full_read_slots(dev
);
437 if (*slots
== -EOVERFLOW
) {
438 /* overflow - reset */
439 dev_dbg(&dev
->pdev
->dev
, "resetting due to slots overflow.\n");
440 /* set the event since message has been read */
447 EXPORT_SYMBOL_GPL(mei_irq_read_handler
);
451 * mei_irq_write_handler - dispatch write requests
454 * @dev: the device structure
455 * @cmpl_list: An instance of our list structure
457 * returns 0 on success, <0 on failure.
459 int mei_irq_write_handler(struct mei_device
*dev
, struct mei_cl_cb
*cmpl_list
)
463 struct mei_cl_cb
*pos
= NULL
, *next
= NULL
;
464 struct mei_cl_cb
*list
;
468 if (!mei_hbuf_is_ready(dev
)) {
469 dev_dbg(&dev
->pdev
->dev
, "host buffer is not empty.\n");
472 slots
= mei_hbuf_empty_slots(dev
);
476 /* complete all waiting for write CB */
477 dev_dbg(&dev
->pdev
->dev
, "complete all waiting for write cb.\n");
479 list
= &dev
->write_waiting_list
;
480 list_for_each_entry_safe(pos
, next
, &list
->list
, list
) {
486 list_del(&pos
->list
);
487 if (MEI_WRITING
== cl
->writing_state
&&
488 pos
->fop_type
== MEI_FOP_WRITE
&&
489 cl
!= &dev
->iamthif_cl
) {
490 dev_dbg(&dev
->pdev
->dev
, "MEI WRITE COMPLETE\n");
491 cl
->writing_state
= MEI_WRITE_COMPLETE
;
492 list_add_tail(&pos
->list
, &cmpl_list
->list
);
494 if (cl
== &dev
->iamthif_cl
) {
495 dev_dbg(&dev
->pdev
->dev
, "check iamthif flow control.\n");
496 if (dev
->iamthif_flow_control_pending
) {
497 ret
= mei_amthif_irq_read(dev
, &slots
);
504 if (dev
->wd_state
== MEI_WD_STOPPING
) {
505 dev
->wd_state
= MEI_WD_IDLE
;
506 wake_up_interruptible(&dev
->wait_stop_wd
);
509 if (dev
->wr_ext_msg
.hdr
.length
) {
510 mei_write_message(dev
, &dev
->wr_ext_msg
.hdr
,
511 dev
->wr_ext_msg
.data
);
512 slots
-= mei_data2slots(dev
->wr_ext_msg
.hdr
.length
);
513 dev
->wr_ext_msg
.hdr
.length
= 0;
515 if (dev
->dev_state
== MEI_DEV_ENABLED
) {
516 if (dev
->wd_pending
&&
517 mei_cl_flow_ctrl_creds(&dev
->wd_cl
) > 0) {
518 if (mei_wd_send(dev
))
519 dev_dbg(&dev
->pdev
->dev
, "wd send failed.\n");
520 else if (mei_cl_flow_ctrl_reduce(&dev
->wd_cl
))
523 dev
->wd_pending
= false;
525 if (dev
->wd_state
== MEI_WD_RUNNING
)
526 slots
-= mei_data2slots(MEI_WD_START_MSG_SIZE
);
528 slots
-= mei_data2slots(MEI_WD_STOP_MSG_SIZE
);
532 /* complete control write list CB */
533 dev_dbg(&dev
->pdev
->dev
, "complete control write list cb.\n");
534 list_for_each_entry_safe(pos
, next
, &dev
->ctrl_wr_list
.list
, list
) {
537 list_del(&pos
->list
);
540 switch (pos
->fop_type
) {
542 /* send disconnect message */
543 ret
= _mei_irq_thread_close(dev
, &slots
, pos
,
550 /* send flow control message */
551 ret
= _mei_irq_thread_read(dev
, &slots
, pos
,
558 /* connect message */
559 if (mei_cl_is_other_connecting(cl
))
561 ret
= _mei_irq_thread_ioctl(dev
, &slots
, pos
,
573 /* complete write list CB */
574 dev_dbg(&dev
->pdev
->dev
, "complete write list cb.\n");
575 list_for_each_entry_safe(pos
, next
, &dev
->write_list
.list
, list
) {
579 if (mei_cl_flow_ctrl_creds(cl
) <= 0) {
580 dev_dbg(&dev
->pdev
->dev
,
581 "No flow control credentials for client %d, not sending.\n",
586 if (cl
== &dev
->iamthif_cl
)
587 ret
= mei_amthif_irq_write_complete(dev
, &slots
,
590 ret
= mei_irq_thread_write_complete(dev
, &slots
, pos
,
598 EXPORT_SYMBOL_GPL(mei_irq_write_handler
);
603 * mei_timer - timer function.
605 * @work: pointer to the work_struct structure
607 * NOTE: This function is called by timer interrupt work
609 void mei_timer(struct work_struct
*work
)
611 unsigned long timeout
;
612 struct mei_cl
*cl_pos
= NULL
;
613 struct mei_cl
*cl_next
= NULL
;
614 struct mei_cl_cb
*cb_pos
= NULL
;
615 struct mei_cl_cb
*cb_next
= NULL
;
617 struct mei_device
*dev
= container_of(work
,
618 struct mei_device
, timer_work
.work
);
621 mutex_lock(&dev
->device_lock
);
622 if (dev
->dev_state
!= MEI_DEV_ENABLED
) {
623 if (dev
->dev_state
== MEI_DEV_INIT_CLIENTS
) {
624 if (dev
->init_clients_timer
) {
625 if (--dev
->init_clients_timer
== 0) {
626 dev_dbg(&dev
->pdev
->dev
, "IMEI reset due to init clients timeout ,init clients state = %d.\n",
627 dev
->init_clients_state
);
634 /*** connect/disconnect timeouts ***/
635 list_for_each_entry_safe(cl_pos
, cl_next
, &dev
->file_list
, link
) {
636 if (cl_pos
->timer_count
) {
637 if (--cl_pos
->timer_count
== 0) {
638 dev_dbg(&dev
->pdev
->dev
, "HECI reset due to connect/disconnect timeout.\n");
645 if (dev
->iamthif_stall_timer
) {
646 if (--dev
->iamthif_stall_timer
== 0) {
647 dev_dbg(&dev
->pdev
->dev
, "resetting because of hang to amthi.\n");
649 dev
->iamthif_msg_buf_size
= 0;
650 dev
->iamthif_msg_buf_index
= 0;
651 dev
->iamthif_canceled
= false;
652 dev
->iamthif_ioctl
= true;
653 dev
->iamthif_state
= MEI_IAMTHIF_IDLE
;
654 dev
->iamthif_timer
= 0;
656 mei_io_cb_free(dev
->iamthif_current_cb
);
657 dev
->iamthif_current_cb
= NULL
;
659 dev
->iamthif_file_object
= NULL
;
660 mei_amthif_run_next_cmd(dev
);
664 if (dev
->iamthif_timer
) {
666 timeout
= dev
->iamthif_timer
+
667 mei_secs_to_jiffies(MEI_IAMTHIF_READ_TIMER
);
669 dev_dbg(&dev
->pdev
->dev
, "dev->iamthif_timer = %ld\n",
671 dev_dbg(&dev
->pdev
->dev
, "timeout = %ld\n", timeout
);
672 dev_dbg(&dev
->pdev
->dev
, "jiffies = %ld\n", jiffies
);
673 if (time_after(jiffies
, timeout
)) {
675 * User didn't read the AMTHI data on time (15sec)
676 * freeing AMTHI for other requests
679 dev_dbg(&dev
->pdev
->dev
, "freeing AMTHI for other requests\n");
681 list_for_each_entry_safe(cb_pos
, cb_next
,
682 &dev
->amthif_rd_complete_list
.list
, list
) {
684 cl_pos
= cb_pos
->file_object
->private_data
;
686 /* Finding the AMTHI entry. */
687 if (cl_pos
== &dev
->iamthif_cl
)
688 list_del(&cb_pos
->list
);
690 mei_io_cb_free(dev
->iamthif_current_cb
);
691 dev
->iamthif_current_cb
= NULL
;
693 dev
->iamthif_file_object
->private_data
= NULL
;
694 dev
->iamthif_file_object
= NULL
;
695 dev
->iamthif_timer
= 0;
696 mei_amthif_run_next_cmd(dev
);
701 schedule_delayed_work(&dev
->timer_work
, 2 * HZ
);
702 mutex_unlock(&dev
->device_lock
);