]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - drivers/misc/mei/interrupt.c
mac80211: fix throughput LED trigger
[mirror_ubuntu-artful-kernel.git] / drivers / misc / mei / interrupt.c
1 /*
2 *
3 * Intel Management Engine Interface (Intel MEI) Linux driver
4 * Copyright (c) 2003-2012, Intel Corporation.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details.
14 *
15 */
16
17
18 #include <linux/export.h>
19 #include <linux/kthread.h>
20 #include <linux/interrupt.h>
21 #include <linux/fs.h>
22 #include <linux/jiffies.h>
23 #include <linux/slab.h>
24
25 #include <linux/mei.h>
26
27 #include "mei_dev.h"
28 #include "hbm.h"
29 #include "client.h"
30
31
32 /**
33 * mei_irq_compl_handler - dispatch complete handlers
34 * for the completed callbacks
35 *
36 * @dev: mei device
37 * @compl_list: list of completed cbs
38 */
39 void mei_irq_compl_handler(struct mei_device *dev, struct mei_cl_cb *compl_list)
40 {
41 struct mei_cl_cb *cb, *next;
42 struct mei_cl *cl;
43
44 list_for_each_entry_safe(cb, next, &compl_list->list, list) {
45 cl = cb->cl;
46 list_del(&cb->list);
47
48 dev_dbg(dev->dev, "completing call back.\n");
49 if (cl == &dev->iamthif_cl)
50 mei_amthif_complete(dev, cb);
51 else
52 mei_cl_complete(cl, cb);
53 }
54 }
55 EXPORT_SYMBOL_GPL(mei_irq_compl_handler);
56
57 /**
58 * mei_cl_hbm_equal - check if hbm is addressed to the client
59 *
60 * @cl: host client
61 * @mei_hdr: header of mei client message
62 *
63 * Return: true if matches, false otherwise
64 */
65 static inline int mei_cl_hbm_equal(struct mei_cl *cl,
66 struct mei_msg_hdr *mei_hdr)
67 {
68 return cl->host_client_id == mei_hdr->host_addr &&
69 cl->me_client_id == mei_hdr->me_addr;
70 }
71 /**
72 * mei_cl_is_reading - checks if the client
73 * is the one to read this message
74 *
75 * @cl: mei client
76 * @mei_hdr: header of mei message
77 *
78 * Return: true on match and false otherwise
79 */
80 static bool mei_cl_is_reading(struct mei_cl *cl, struct mei_msg_hdr *mei_hdr)
81 {
82 return mei_cl_hbm_equal(cl, mei_hdr) &&
83 cl->state == MEI_FILE_CONNECTED &&
84 cl->reading_state != MEI_READ_COMPLETE;
85 }
86
87 /**
88 * mei_cl_irq_read_msg - process client message
89 *
90 * @dev: the device structure
91 * @mei_hdr: header of mei client message
92 * @complete_list: An instance of our list structure
93 *
94 * Return: 0 on success, <0 on failure.
95 */
96 static int mei_cl_irq_read_msg(struct mei_device *dev,
97 struct mei_msg_hdr *mei_hdr,
98 struct mei_cl_cb *complete_list)
99 {
100 struct mei_cl *cl;
101 struct mei_cl_cb *cb, *next;
102 unsigned char *buffer = NULL;
103
104 list_for_each_entry_safe(cb, next, &dev->read_list.list, list) {
105 cl = cb->cl;
106 if (!mei_cl_is_reading(cl, mei_hdr))
107 continue;
108
109 cl->reading_state = MEI_READING;
110
111 if (cb->response_buffer.size == 0 ||
112 cb->response_buffer.data == NULL) {
113 cl_err(dev, cl, "response buffer is not allocated.\n");
114 list_del(&cb->list);
115 return -ENOMEM;
116 }
117
118 if (cb->response_buffer.size < mei_hdr->length + cb->buf_idx) {
119 cl_dbg(dev, cl, "message overflow. size %d len %d idx %ld\n",
120 cb->response_buffer.size,
121 mei_hdr->length, cb->buf_idx);
122 buffer = krealloc(cb->response_buffer.data,
123 mei_hdr->length + cb->buf_idx,
124 GFP_KERNEL);
125
126 if (!buffer) {
127 list_del(&cb->list);
128 return -ENOMEM;
129 }
130 cb->response_buffer.data = buffer;
131 cb->response_buffer.size =
132 mei_hdr->length + cb->buf_idx;
133 }
134
135 buffer = cb->response_buffer.data + cb->buf_idx;
136 mei_read_slots(dev, buffer, mei_hdr->length);
137
138 cb->buf_idx += mei_hdr->length;
139 if (mei_hdr->msg_complete) {
140 cl->status = 0;
141 list_del(&cb->list);
142 cl_dbg(dev, cl, "completed read length = %lu\n",
143 cb->buf_idx);
144 list_add_tail(&cb->list, &complete_list->list);
145 }
146 break;
147 }
148
149 dev_dbg(dev->dev, "message read\n");
150 if (!buffer) {
151 mei_read_slots(dev, dev->rd_msg_buf, mei_hdr->length);
152 dev_dbg(dev->dev, "discarding message " MEI_HDR_FMT "\n",
153 MEI_HDR_PRM(mei_hdr));
154 }
155
156 return 0;
157 }
158
159 /**
160 * mei_cl_irq_disconnect_rsp - send disconnection response message
161 *
162 * @cl: client
163 * @cb: callback block.
164 * @cmpl_list: complete list.
165 *
166 * Return: 0, OK; otherwise, error.
167 */
168 static int mei_cl_irq_disconnect_rsp(struct mei_cl *cl, struct mei_cl_cb *cb,
169 struct mei_cl_cb *cmpl_list)
170 {
171 struct mei_device *dev = cl->dev;
172 u32 msg_slots;
173 int slots;
174 int ret;
175
176 slots = mei_hbuf_empty_slots(dev);
177 msg_slots = mei_data2slots(sizeof(struct hbm_client_connect_response));
178
179 if (slots < msg_slots)
180 return -EMSGSIZE;
181
182 ret = mei_hbm_cl_disconnect_rsp(dev, cl);
183
184 cl->state = MEI_FILE_DISCONNECTED;
185 cl->status = 0;
186 list_del(&cb->list);
187 mei_io_cb_free(cb);
188
189 return ret;
190 }
191
192
193
194 /**
195 * mei_cl_irq_disconnect - processes close related operation from
196 * interrupt thread context - send disconnect request
197 *
198 * @cl: client
199 * @cb: callback block.
200 * @cmpl_list: complete list.
201 *
202 * Return: 0, OK; otherwise, error.
203 */
204 static int mei_cl_irq_disconnect(struct mei_cl *cl, struct mei_cl_cb *cb,
205 struct mei_cl_cb *cmpl_list)
206 {
207 struct mei_device *dev = cl->dev;
208 u32 msg_slots;
209 int slots;
210
211 msg_slots = mei_data2slots(sizeof(struct hbm_client_connect_request));
212 slots = mei_hbuf_empty_slots(dev);
213
214 if (slots < msg_slots)
215 return -EMSGSIZE;
216
217 if (mei_hbm_cl_disconnect_req(dev, cl)) {
218 cl->status = 0;
219 cb->buf_idx = 0;
220 list_move_tail(&cb->list, &cmpl_list->list);
221 return -EIO;
222 }
223
224 cl->state = MEI_FILE_DISCONNECTING;
225 cl->status = 0;
226 cb->buf_idx = 0;
227 list_move_tail(&cb->list, &dev->ctrl_rd_list.list);
228 cl->timer_count = MEI_CONNECT_TIMEOUT;
229
230 return 0;
231 }
232
233
234 /**
235 * mei_cl_irq_read - processes client read related operation from the
236 * interrupt thread context - request for flow control credits
237 *
238 * @cl: client
239 * @cb: callback block.
240 * @cmpl_list: complete list.
241 *
242 * Return: 0, OK; otherwise, error.
243 */
244 static int mei_cl_irq_read(struct mei_cl *cl, struct mei_cl_cb *cb,
245 struct mei_cl_cb *cmpl_list)
246 {
247 struct mei_device *dev = cl->dev;
248 u32 msg_slots;
249 int slots;
250 int ret;
251
252 msg_slots = mei_data2slots(sizeof(struct hbm_flow_control));
253 slots = mei_hbuf_empty_slots(dev);
254
255 if (slots < msg_slots)
256 return -EMSGSIZE;
257
258 ret = mei_hbm_cl_flow_control_req(dev, cl);
259 if (ret) {
260 cl->status = ret;
261 cb->buf_idx = 0;
262 list_move_tail(&cb->list, &cmpl_list->list);
263 return ret;
264 }
265
266 list_move_tail(&cb->list, &dev->read_list.list);
267
268 return 0;
269 }
270
271
272 /**
273 * mei_cl_irq_connect - send connect request in irq_thread context
274 *
275 * @cl: client
276 * @cb: callback block.
277 * @cmpl_list: complete list.
278 *
279 * Return: 0, OK; otherwise, error.
280 */
281 static int mei_cl_irq_connect(struct mei_cl *cl, struct mei_cl_cb *cb,
282 struct mei_cl_cb *cmpl_list)
283 {
284 struct mei_device *dev = cl->dev;
285 u32 msg_slots;
286 int slots;
287 int ret;
288
289 msg_slots = mei_data2slots(sizeof(struct hbm_client_connect_request));
290 slots = mei_hbuf_empty_slots(dev);
291
292 if (mei_cl_is_other_connecting(cl))
293 return 0;
294
295 if (slots < msg_slots)
296 return -EMSGSIZE;
297
298 cl->state = MEI_FILE_CONNECTING;
299
300 ret = mei_hbm_cl_connect_req(dev, cl);
301 if (ret) {
302 cl->status = ret;
303 cb->buf_idx = 0;
304 list_del(&cb->list);
305 return ret;
306 }
307
308 list_move_tail(&cb->list, &dev->ctrl_rd_list.list);
309 cl->timer_count = MEI_CONNECT_TIMEOUT;
310 return 0;
311 }
312
313
314 /**
315 * mei_irq_read_handler - bottom half read routine after ISR to
316 * handle the read processing.
317 *
318 * @dev: the device structure
319 * @cmpl_list: An instance of our list structure
320 * @slots: slots to read.
321 *
322 * Return: 0 on success, <0 on failure.
323 */
324 int mei_irq_read_handler(struct mei_device *dev,
325 struct mei_cl_cb *cmpl_list, s32 *slots)
326 {
327 struct mei_msg_hdr *mei_hdr;
328 struct mei_cl *cl;
329 int ret;
330
331 if (!dev->rd_msg_hdr) {
332 dev->rd_msg_hdr = mei_read_hdr(dev);
333 (*slots)--;
334 dev_dbg(dev->dev, "slots =%08x.\n", *slots);
335 }
336 mei_hdr = (struct mei_msg_hdr *) &dev->rd_msg_hdr;
337 dev_dbg(dev->dev, MEI_HDR_FMT, MEI_HDR_PRM(mei_hdr));
338
339 if (mei_hdr->reserved || !dev->rd_msg_hdr) {
340 dev_err(dev->dev, "corrupted message header 0x%08X\n",
341 dev->rd_msg_hdr);
342 ret = -EBADMSG;
343 goto end;
344 }
345
346 if (mei_slots2data(*slots) < mei_hdr->length) {
347 dev_err(dev->dev, "less data available than length=%08x.\n",
348 *slots);
349 /* we can't read the message */
350 ret = -ENODATA;
351 goto end;
352 }
353
354 /* HBM message */
355 if (mei_hdr->host_addr == 0 && mei_hdr->me_addr == 0) {
356 ret = mei_hbm_dispatch(dev, mei_hdr);
357 if (ret) {
358 dev_dbg(dev->dev, "mei_hbm_dispatch failed ret = %d\n",
359 ret);
360 goto end;
361 }
362 goto reset_slots;
363 }
364
365 /* find recipient cl */
366 list_for_each_entry(cl, &dev->file_list, link) {
367 if (mei_cl_hbm_equal(cl, mei_hdr)) {
368 cl_dbg(dev, cl, "got a message\n");
369 break;
370 }
371 }
372
373 /* if no recipient cl was found we assume corrupted header */
374 if (&cl->link == &dev->file_list) {
375 dev_err(dev->dev, "no destination client found 0x%08X\n",
376 dev->rd_msg_hdr);
377 ret = -EBADMSG;
378 goto end;
379 }
380
381 if (mei_hdr->host_addr == dev->iamthif_cl.host_client_id &&
382 MEI_FILE_CONNECTED == dev->iamthif_cl.state &&
383 dev->iamthif_state == MEI_IAMTHIF_READING) {
384
385 ret = mei_amthif_irq_read_msg(dev, mei_hdr, cmpl_list);
386 if (ret) {
387 dev_err(dev->dev, "mei_amthif_irq_read_msg failed = %d\n",
388 ret);
389 goto end;
390 }
391 } else {
392 ret = mei_cl_irq_read_msg(dev, mei_hdr, cmpl_list);
393 if (ret) {
394 dev_err(dev->dev, "mei_cl_irq_read_msg failed = %d\n",
395 ret);
396 goto end;
397 }
398 }
399
400 reset_slots:
401 /* reset the number of slots and header */
402 *slots = mei_count_full_read_slots(dev);
403 dev->rd_msg_hdr = 0;
404
405 if (*slots == -EOVERFLOW) {
406 /* overflow - reset */
407 dev_err(dev->dev, "resetting due to slots overflow.\n");
408 /* set the event since message has been read */
409 ret = -ERANGE;
410 goto end;
411 }
412 end:
413 return ret;
414 }
415 EXPORT_SYMBOL_GPL(mei_irq_read_handler);
416
417
418 /**
419 * mei_irq_write_handler - dispatch write requests
420 * after irq received
421 *
422 * @dev: the device structure
423 * @cmpl_list: An instance of our list structure
424 *
425 * Return: 0 on success, <0 on failure.
426 */
427 int mei_irq_write_handler(struct mei_device *dev, struct mei_cl_cb *cmpl_list)
428 {
429
430 struct mei_cl *cl;
431 struct mei_cl_cb *cb, *next;
432 struct mei_cl_cb *list;
433 s32 slots;
434 int ret;
435
436
437 if (!mei_hbuf_acquire(dev))
438 return 0;
439
440 slots = mei_hbuf_empty_slots(dev);
441 if (slots <= 0)
442 return -EMSGSIZE;
443
444 /* complete all waiting for write CB */
445 dev_dbg(dev->dev, "complete all waiting for write cb.\n");
446
447 list = &dev->write_waiting_list;
448 list_for_each_entry_safe(cb, next, &list->list, list) {
449 cl = cb->cl;
450
451 cl->status = 0;
452 list_del(&cb->list);
453 if (cb->fop_type == MEI_FOP_WRITE &&
454 cl != &dev->iamthif_cl) {
455 cl_dbg(dev, cl, "MEI WRITE COMPLETE\n");
456 cl->writing_state = MEI_WRITE_COMPLETE;
457 list_add_tail(&cb->list, &cmpl_list->list);
458 }
459 if (cl == &dev->iamthif_cl) {
460 cl_dbg(dev, cl, "check iamthif flow control.\n");
461 if (dev->iamthif_flow_control_pending) {
462 ret = mei_amthif_irq_read(dev, &slots);
463 if (ret)
464 return ret;
465 }
466 }
467 }
468
469 if (dev->wd_state == MEI_WD_STOPPING) {
470 dev->wd_state = MEI_WD_IDLE;
471 wake_up(&dev->wait_stop_wd);
472 }
473
474 if (mei_cl_is_connected(&dev->wd_cl)) {
475 if (dev->wd_pending &&
476 mei_cl_flow_ctrl_creds(&dev->wd_cl) > 0) {
477 ret = mei_wd_send(dev);
478 if (ret)
479 return ret;
480 dev->wd_pending = false;
481 }
482 }
483
484 /* complete control write list CB */
485 dev_dbg(dev->dev, "complete control write list cb.\n");
486 list_for_each_entry_safe(cb, next, &dev->ctrl_wr_list.list, list) {
487 cl = cb->cl;
488 switch (cb->fop_type) {
489 case MEI_FOP_DISCONNECT:
490 /* send disconnect message */
491 ret = mei_cl_irq_disconnect(cl, cb, cmpl_list);
492 if (ret)
493 return ret;
494
495 break;
496 case MEI_FOP_READ:
497 /* send flow control message */
498 ret = mei_cl_irq_read(cl, cb, cmpl_list);
499 if (ret)
500 return ret;
501
502 break;
503 case MEI_FOP_CONNECT:
504 /* connect message */
505 ret = mei_cl_irq_connect(cl, cb, cmpl_list);
506 if (ret)
507 return ret;
508
509 break;
510 case MEI_FOP_DISCONNECT_RSP:
511 /* send disconnect resp */
512 ret = mei_cl_irq_disconnect_rsp(cl, cb, cmpl_list);
513 if (ret)
514 return ret;
515 break;
516 default:
517 BUG();
518 }
519
520 }
521 /* complete write list CB */
522 dev_dbg(dev->dev, "complete write list cb.\n");
523 list_for_each_entry_safe(cb, next, &dev->write_list.list, list) {
524 cl = cb->cl;
525 if (cl == &dev->iamthif_cl)
526 ret = mei_amthif_irq_write(cl, cb, cmpl_list);
527 else
528 ret = mei_cl_irq_write(cl, cb, cmpl_list);
529 if (ret)
530 return ret;
531 }
532 return 0;
533 }
534 EXPORT_SYMBOL_GPL(mei_irq_write_handler);
535
536
537
538 /**
539 * mei_timer - timer function.
540 *
541 * @work: pointer to the work_struct structure
542 *
543 */
544 void mei_timer(struct work_struct *work)
545 {
546 unsigned long timeout;
547 struct mei_cl *cl;
548
549 struct mei_device *dev = container_of(work,
550 struct mei_device, timer_work.work);
551
552
553 mutex_lock(&dev->device_lock);
554
555 /* Catch interrupt stalls during HBM init handshake */
556 if (dev->dev_state == MEI_DEV_INIT_CLIENTS &&
557 dev->hbm_state != MEI_HBM_IDLE) {
558
559 if (dev->init_clients_timer) {
560 if (--dev->init_clients_timer == 0) {
561 dev_err(dev->dev, "timer: init clients timeout hbm_state = %d.\n",
562 dev->hbm_state);
563 mei_reset(dev);
564 goto out;
565 }
566 }
567 }
568
569 if (dev->dev_state != MEI_DEV_ENABLED)
570 goto out;
571
572 /*** connect/disconnect timeouts ***/
573 list_for_each_entry(cl, &dev->file_list, link) {
574 if (cl->timer_count) {
575 if (--cl->timer_count == 0) {
576 dev_err(dev->dev, "timer: connect/disconnect timeout.\n");
577 mei_reset(dev);
578 goto out;
579 }
580 }
581 }
582
583 if (!mei_cl_is_connected(&dev->iamthif_cl))
584 goto out;
585
586 if (dev->iamthif_stall_timer) {
587 if (--dev->iamthif_stall_timer == 0) {
588 dev_err(dev->dev, "timer: amthif hanged.\n");
589 mei_reset(dev);
590 dev->iamthif_msg_buf_size = 0;
591 dev->iamthif_msg_buf_index = 0;
592 dev->iamthif_canceled = false;
593 dev->iamthif_ioctl = true;
594 dev->iamthif_state = MEI_IAMTHIF_IDLE;
595 dev->iamthif_timer = 0;
596
597 mei_io_cb_free(dev->iamthif_current_cb);
598 dev->iamthif_current_cb = NULL;
599
600 dev->iamthif_file_object = NULL;
601 mei_amthif_run_next_cmd(dev);
602 }
603 }
604
605 if (dev->iamthif_timer) {
606
607 timeout = dev->iamthif_timer +
608 mei_secs_to_jiffies(MEI_IAMTHIF_READ_TIMER);
609
610 dev_dbg(dev->dev, "dev->iamthif_timer = %ld\n",
611 dev->iamthif_timer);
612 dev_dbg(dev->dev, "timeout = %ld\n", timeout);
613 dev_dbg(dev->dev, "jiffies = %ld\n", jiffies);
614 if (time_after(jiffies, timeout)) {
615 /*
616 * User didn't read the AMTHI data on time (15sec)
617 * freeing AMTHI for other requests
618 */
619
620 dev_dbg(dev->dev, "freeing AMTHI for other requests\n");
621
622 mei_io_list_flush(&dev->amthif_rd_complete_list,
623 &dev->iamthif_cl);
624 mei_io_cb_free(dev->iamthif_current_cb);
625 dev->iamthif_current_cb = NULL;
626
627 dev->iamthif_file_object->private_data = NULL;
628 dev->iamthif_file_object = NULL;
629 dev->iamthif_timer = 0;
630 mei_amthif_run_next_cmd(dev);
631
632 }
633 }
634 out:
635 if (dev->dev_state != MEI_DEV_DISABLED)
636 schedule_delayed_work(&dev->timer_work, 2 * HZ);
637 mutex_unlock(&dev->device_lock);
638 }
639