]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - drivers/staging/media/cec/cec-adap.c
Merge remote-tracking branches 'asoc/topic/sgtl5000', 'asoc/topic/simple', 'asoc...
[mirror_ubuntu-bionic-kernel.git] / drivers / staging / media / cec / cec-adap.c
1 /*
2 * cec-adap.c - HDMI Consumer Electronics Control framework - CEC adapter
3 *
4 * Copyright 2016 Cisco Systems, Inc. and/or its affiliates. All rights reserved.
5 *
6 * This program is free software; you may redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; version 2 of the License.
9 *
10 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
11 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
12 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
13 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
14 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
15 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
16 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
17 * SOFTWARE.
18 */
19
20 #include <linux/errno.h>
21 #include <linux/init.h>
22 #include <linux/module.h>
23 #include <linux/kernel.h>
24 #include <linux/kmod.h>
25 #include <linux/ktime.h>
26 #include <linux/slab.h>
27 #include <linux/mm.h>
28 #include <linux/string.h>
29 #include <linux/types.h>
30
31 #include "cec-priv.h"
32
33 static int cec_report_features(struct cec_adapter *adap, unsigned int la_idx);
34 static int cec_report_phys_addr(struct cec_adapter *adap, unsigned int la_idx);
35
36 /*
37 * 400 ms is the time it takes for one 16 byte message to be
38 * transferred and 5 is the maximum number of retries. Add
39 * another 100 ms as a margin. So if the transmit doesn't
40 * finish before that time something is really wrong and we
41 * have to time out.
42 *
43 * This is a sign that something it really wrong and a warning
44 * will be issued.
45 */
46 #define CEC_XFER_TIMEOUT_MS (5 * 400 + 100)
47
48 #define call_op(adap, op, arg...) \
49 (adap->ops->op ? adap->ops->op(adap, ## arg) : 0)
50
51 #define call_void_op(adap, op, arg...) \
52 do { \
53 if (adap->ops->op) \
54 adap->ops->op(adap, ## arg); \
55 } while (0)
56
57 static int cec_log_addr2idx(const struct cec_adapter *adap, u8 log_addr)
58 {
59 int i;
60
61 for (i = 0; i < adap->log_addrs.num_log_addrs; i++)
62 if (adap->log_addrs.log_addr[i] == log_addr)
63 return i;
64 return -1;
65 }
66
67 static unsigned int cec_log_addr2dev(const struct cec_adapter *adap, u8 log_addr)
68 {
69 int i = cec_log_addr2idx(adap, log_addr);
70
71 return adap->log_addrs.primary_device_type[i < 0 ? 0 : i];
72 }
73
74 /*
75 * Queue a new event for this filehandle. If ts == 0, then set it
76 * to the current time.
77 *
78 * The two events that are currently defined do not need to keep track
79 * of intermediate events, so no actual queue of events is needed,
80 * instead just store the latest state and the total number of lost
81 * messages.
82 *
83 * Should new events be added in the future that require intermediate
84 * results to be queued as well, then a proper queue data structure is
85 * required. But until then, just keep it simple.
86 */
87 void cec_queue_event_fh(struct cec_fh *fh,
88 const struct cec_event *new_ev, u64 ts)
89 {
90 struct cec_event *ev = &fh->events[new_ev->event - 1];
91
92 if (ts == 0)
93 ts = ktime_get_ns();
94
95 mutex_lock(&fh->lock);
96 if (new_ev->event == CEC_EVENT_LOST_MSGS &&
97 fh->pending_events & (1 << new_ev->event)) {
98 /*
99 * If there is already a lost_msgs event, then just
100 * update the lost_msgs count. This effectively
101 * merges the old and new events into one.
102 */
103 ev->lost_msgs.lost_msgs += new_ev->lost_msgs.lost_msgs;
104 goto unlock;
105 }
106
107 /*
108 * Intermediate states are not interesting, so just
109 * overwrite any older event.
110 */
111 *ev = *new_ev;
112 ev->ts = ts;
113 fh->pending_events |= 1 << new_ev->event;
114
115 unlock:
116 mutex_unlock(&fh->lock);
117 wake_up_interruptible(&fh->wait);
118 }
119
120 /* Queue a new event for all open filehandles. */
121 static void cec_queue_event(struct cec_adapter *adap,
122 const struct cec_event *ev)
123 {
124 u64 ts = ktime_get_ns();
125 struct cec_fh *fh;
126
127 mutex_lock(&adap->devnode.lock);
128 list_for_each_entry(fh, &adap->devnode.fhs, list)
129 cec_queue_event_fh(fh, ev, ts);
130 mutex_unlock(&adap->devnode.lock);
131 }
132
133 /*
134 * Queue a new message for this filehandle. If there is no more room
135 * in the queue, then send the LOST_MSGS event instead.
136 */
137 static void cec_queue_msg_fh(struct cec_fh *fh, const struct cec_msg *msg)
138 {
139 static const struct cec_event ev_lost_msg = {
140 .ts = 0,
141 .event = CEC_EVENT_LOST_MSGS,
142 .flags = 0,
143 {
144 .lost_msgs.lost_msgs = 1,
145 },
146 };
147 struct cec_msg_entry *entry;
148
149 mutex_lock(&fh->lock);
150 entry = kmalloc(sizeof(*entry), GFP_KERNEL);
151 if (!entry)
152 goto lost_msgs;
153
154 entry->msg = *msg;
155 /* Add new msg at the end of the queue */
156 list_add_tail(&entry->list, &fh->msgs);
157
158 /*
159 * if the queue now has more than CEC_MAX_MSG_RX_QUEUE_SZ
160 * messages, drop the oldest one and send a lost message event.
161 */
162 if (fh->queued_msgs == CEC_MAX_MSG_RX_QUEUE_SZ) {
163 list_del(&entry->list);
164 goto lost_msgs;
165 }
166 fh->queued_msgs++;
167 mutex_unlock(&fh->lock);
168 wake_up_interruptible(&fh->wait);
169 return;
170
171 lost_msgs:
172 mutex_unlock(&fh->lock);
173 cec_queue_event_fh(fh, &ev_lost_msg, 0);
174 }
175
176 /*
177 * Queue the message for those filehandles that are in monitor mode.
178 * If valid_la is true (this message is for us or was sent by us),
179 * then pass it on to any monitoring filehandle. If this message
180 * isn't for us or from us, then only give it to filehandles that
181 * are in MONITOR_ALL mode.
182 *
183 * This can only happen if the CEC_CAP_MONITOR_ALL capability is
184 * set and the CEC adapter was placed in 'monitor all' mode.
185 */
186 static void cec_queue_msg_monitor(struct cec_adapter *adap,
187 const struct cec_msg *msg,
188 bool valid_la)
189 {
190 struct cec_fh *fh;
191 u32 monitor_mode = valid_la ? CEC_MODE_MONITOR :
192 CEC_MODE_MONITOR_ALL;
193
194 mutex_lock(&adap->devnode.lock);
195 list_for_each_entry(fh, &adap->devnode.fhs, list) {
196 if (fh->mode_follower >= monitor_mode)
197 cec_queue_msg_fh(fh, msg);
198 }
199 mutex_unlock(&adap->devnode.lock);
200 }
201
202 /*
203 * Queue the message for follower filehandles.
204 */
205 static void cec_queue_msg_followers(struct cec_adapter *adap,
206 const struct cec_msg *msg)
207 {
208 struct cec_fh *fh;
209
210 mutex_lock(&adap->devnode.lock);
211 list_for_each_entry(fh, &adap->devnode.fhs, list) {
212 if (fh->mode_follower == CEC_MODE_FOLLOWER)
213 cec_queue_msg_fh(fh, msg);
214 }
215 mutex_unlock(&adap->devnode.lock);
216 }
217
218 /* Notify userspace of an adapter state change. */
219 static void cec_post_state_event(struct cec_adapter *adap)
220 {
221 struct cec_event ev = {
222 .event = CEC_EVENT_STATE_CHANGE,
223 };
224
225 ev.state_change.phys_addr = adap->phys_addr;
226 ev.state_change.log_addr_mask = adap->log_addrs.log_addr_mask;
227 cec_queue_event(adap, &ev);
228 }
229
230 /*
231 * A CEC transmit (and a possible wait for reply) completed.
232 * If this was in blocking mode, then complete it, otherwise
233 * queue the message for userspace to dequeue later.
234 *
235 * This function is called with adap->lock held.
236 */
237 static void cec_data_completed(struct cec_data *data)
238 {
239 /*
240 * Delete this transmit from the filehandle's xfer_list since
241 * we're done with it.
242 *
243 * Note that if the filehandle is closed before this transmit
244 * finished, then the release() function will set data->fh to NULL.
245 * Without that we would be referring to a closed filehandle.
246 */
247 if (data->fh)
248 list_del(&data->xfer_list);
249
250 if (data->blocking) {
251 /*
252 * Someone is blocking so mark the message as completed
253 * and call complete.
254 */
255 data->completed = true;
256 complete(&data->c);
257 } else {
258 /*
259 * No blocking, so just queue the message if needed and
260 * free the memory.
261 */
262 if (data->fh)
263 cec_queue_msg_fh(data->fh, &data->msg);
264 kfree(data);
265 }
266 }
267
268 /*
269 * A pending CEC transmit needs to be cancelled, either because the CEC
270 * adapter is disabled or the transmit takes an impossibly long time to
271 * finish.
272 *
273 * This function is called with adap->lock held.
274 */
275 static void cec_data_cancel(struct cec_data *data)
276 {
277 /*
278 * It's either the current transmit, or it is a pending
279 * transmit. Take the appropriate action to clear it.
280 */
281 if (data->adap->transmitting == data) {
282 data->adap->transmitting = NULL;
283 } else {
284 list_del_init(&data->list);
285 if (!(data->msg.tx_status & CEC_TX_STATUS_OK))
286 data->adap->transmit_queue_sz--;
287 }
288
289 /* Mark it as an error */
290 data->msg.tx_ts = ktime_get_ns();
291 data->msg.tx_status = CEC_TX_STATUS_ERROR |
292 CEC_TX_STATUS_MAX_RETRIES;
293 data->attempts = 0;
294 data->msg.tx_error_cnt = 1;
295 /* Queue transmitted message for monitoring purposes */
296 cec_queue_msg_monitor(data->adap, &data->msg, 1);
297
298 cec_data_completed(data);
299 }
300
301 /*
302 * Main CEC state machine
303 *
304 * Wait until the thread should be stopped, or we are not transmitting and
305 * a new transmit message is queued up, in which case we start transmitting
306 * that message. When the adapter finished transmitting the message it will
307 * call cec_transmit_done().
308 *
309 * If the adapter is disabled, then remove all queued messages instead.
310 *
311 * If the current transmit times out, then cancel that transmit.
312 */
313 int cec_thread_func(void *_adap)
314 {
315 struct cec_adapter *adap = _adap;
316
317 for (;;) {
318 unsigned int signal_free_time;
319 struct cec_data *data;
320 bool timeout = false;
321 u8 attempts;
322
323 if (adap->transmitting) {
324 int err;
325
326 /*
327 * We are transmitting a message, so add a timeout
328 * to prevent the state machine to get stuck waiting
329 * for this message to finalize and add a check to
330 * see if the adapter is disabled in which case the
331 * transmit should be canceled.
332 */
333 err = wait_event_interruptible_timeout(adap->kthread_waitq,
334 kthread_should_stop() ||
335 (!adap->is_configured && !adap->is_configuring) ||
336 (!adap->transmitting &&
337 !list_empty(&adap->transmit_queue)),
338 msecs_to_jiffies(CEC_XFER_TIMEOUT_MS));
339 timeout = err == 0;
340 } else {
341 /* Otherwise we just wait for something to happen. */
342 wait_event_interruptible(adap->kthread_waitq,
343 kthread_should_stop() ||
344 (!adap->transmitting &&
345 !list_empty(&adap->transmit_queue)));
346 }
347
348 mutex_lock(&adap->lock);
349
350 if ((!adap->is_configured && !adap->is_configuring) ||
351 kthread_should_stop()) {
352 /*
353 * If the adapter is disabled, or we're asked to stop,
354 * then cancel any pending transmits.
355 */
356 while (!list_empty(&adap->transmit_queue)) {
357 data = list_first_entry(&adap->transmit_queue,
358 struct cec_data, list);
359 cec_data_cancel(data);
360 }
361 if (adap->transmitting)
362 cec_data_cancel(adap->transmitting);
363
364 /*
365 * Cancel the pending timeout work. We have to unlock
366 * the mutex when flushing the work since
367 * cec_wait_timeout() will take it. This is OK since
368 * no new entries can be added to wait_queue as long
369 * as adap->transmitting is NULL, which it is due to
370 * the cec_data_cancel() above.
371 */
372 while (!list_empty(&adap->wait_queue)) {
373 data = list_first_entry(&adap->wait_queue,
374 struct cec_data, list);
375
376 if (!cancel_delayed_work(&data->work)) {
377 mutex_unlock(&adap->lock);
378 flush_scheduled_work();
379 mutex_lock(&adap->lock);
380 }
381 cec_data_cancel(data);
382 }
383 goto unlock;
384 }
385
386 if (adap->transmitting && timeout) {
387 /*
388 * If we timeout, then log that. This really shouldn't
389 * happen and is an indication of a faulty CEC adapter
390 * driver, or the CEC bus is in some weird state.
391 */
392 dprintk(0, "message %*ph timed out!\n",
393 adap->transmitting->msg.len,
394 adap->transmitting->msg.msg);
395 /* Just give up on this. */
396 cec_data_cancel(adap->transmitting);
397 goto unlock;
398 }
399
400 /*
401 * If we are still transmitting, or there is nothing new to
402 * transmit, then just continue waiting.
403 */
404 if (adap->transmitting || list_empty(&adap->transmit_queue))
405 goto unlock;
406
407 /* Get a new message to transmit */
408 data = list_first_entry(&adap->transmit_queue,
409 struct cec_data, list);
410 list_del_init(&data->list);
411 adap->transmit_queue_sz--;
412 /* Make this the current transmitting message */
413 adap->transmitting = data;
414
415 /*
416 * Suggested number of attempts as per the CEC 2.0 spec:
417 * 4 attempts is the default, except for 'secondary poll
418 * messages', i.e. poll messages not sent during the adapter
419 * configuration phase when it allocates logical addresses.
420 */
421 if (data->msg.len == 1 && adap->is_configured)
422 attempts = 2;
423 else
424 attempts = 4;
425
426 /* Set the suggested signal free time */
427 if (data->attempts) {
428 /* should be >= 3 data bit periods for a retry */
429 signal_free_time = CEC_SIGNAL_FREE_TIME_RETRY;
430 } else if (data->new_initiator) {
431 /* should be >= 5 data bit periods for new initiator */
432 signal_free_time = CEC_SIGNAL_FREE_TIME_NEW_INITIATOR;
433 } else {
434 /*
435 * should be >= 7 data bit periods for sending another
436 * frame immediately after another.
437 */
438 signal_free_time = CEC_SIGNAL_FREE_TIME_NEXT_XFER;
439 }
440 if (data->attempts == 0)
441 data->attempts = attempts;
442
443 /* Tell the adapter to transmit, cancel on error */
444 if (adap->ops->adap_transmit(adap, data->attempts,
445 signal_free_time, &data->msg))
446 cec_data_cancel(data);
447
448 unlock:
449 mutex_unlock(&adap->lock);
450
451 if (kthread_should_stop())
452 break;
453 }
454 return 0;
455 }
456
457 /*
458 * Called by the CEC adapter if a transmit finished.
459 */
460 void cec_transmit_done(struct cec_adapter *adap, u8 status, u8 arb_lost_cnt,
461 u8 nack_cnt, u8 low_drive_cnt, u8 error_cnt)
462 {
463 struct cec_data *data;
464 struct cec_msg *msg;
465 u64 ts = ktime_get_ns();
466
467 dprintk(2, "cec_transmit_done %02x\n", status);
468 mutex_lock(&adap->lock);
469 data = adap->transmitting;
470 if (!data) {
471 /*
472 * This can happen if a transmit was issued and the cable is
473 * unplugged while the transmit is ongoing. Ignore this
474 * transmit in that case.
475 */
476 dprintk(1, "cec_transmit_done without an ongoing transmit!\n");
477 goto unlock;
478 }
479
480 msg = &data->msg;
481
482 /* Drivers must fill in the status! */
483 WARN_ON(status == 0);
484 msg->tx_ts = ts;
485 msg->tx_status |= status;
486 msg->tx_arb_lost_cnt += arb_lost_cnt;
487 msg->tx_nack_cnt += nack_cnt;
488 msg->tx_low_drive_cnt += low_drive_cnt;
489 msg->tx_error_cnt += error_cnt;
490
491 /* Mark that we're done with this transmit */
492 adap->transmitting = NULL;
493
494 /*
495 * If there are still retry attempts left and there was an error and
496 * the hardware didn't signal that it retried itself (by setting
497 * CEC_TX_STATUS_MAX_RETRIES), then we will retry ourselves.
498 */
499 if (data->attempts > 1 &&
500 !(status & (CEC_TX_STATUS_MAX_RETRIES | CEC_TX_STATUS_OK))) {
501 /* Retry this message */
502 data->attempts--;
503 /* Add the message in front of the transmit queue */
504 list_add(&data->list, &adap->transmit_queue);
505 adap->transmit_queue_sz++;
506 goto wake_thread;
507 }
508
509 data->attempts = 0;
510
511 /* Always set CEC_TX_STATUS_MAX_RETRIES on error */
512 if (!(status & CEC_TX_STATUS_OK))
513 msg->tx_status |= CEC_TX_STATUS_MAX_RETRIES;
514
515 /* Queue transmitted message for monitoring purposes */
516 cec_queue_msg_monitor(adap, msg, 1);
517
518 if ((status & CEC_TX_STATUS_OK) && adap->is_configured &&
519 msg->timeout) {
520 /*
521 * Queue the message into the wait queue if we want to wait
522 * for a reply.
523 */
524 list_add_tail(&data->list, &adap->wait_queue);
525 schedule_delayed_work(&data->work,
526 msecs_to_jiffies(msg->timeout));
527 } else {
528 /* Otherwise we're done */
529 cec_data_completed(data);
530 }
531
532 wake_thread:
533 /*
534 * Wake up the main thread to see if another message is ready
535 * for transmitting or to retry the current message.
536 */
537 wake_up_interruptible(&adap->kthread_waitq);
538 unlock:
539 mutex_unlock(&adap->lock);
540 }
541 EXPORT_SYMBOL_GPL(cec_transmit_done);
542
543 /*
544 * Called when waiting for a reply times out.
545 */
546 static void cec_wait_timeout(struct work_struct *work)
547 {
548 struct cec_data *data = container_of(work, struct cec_data, work.work);
549 struct cec_adapter *adap = data->adap;
550
551 mutex_lock(&adap->lock);
552 /*
553 * Sanity check in case the timeout and the arrival of the message
554 * happened at the same time.
555 */
556 if (list_empty(&data->list))
557 goto unlock;
558
559 /* Mark the message as timed out */
560 list_del_init(&data->list);
561 data->msg.rx_ts = ktime_get_ns();
562 data->msg.rx_status = CEC_RX_STATUS_TIMEOUT;
563 cec_data_completed(data);
564 unlock:
565 mutex_unlock(&adap->lock);
566 }
567
568 /*
569 * Transmit a message. The fh argument may be NULL if the transmit is not
570 * associated with a specific filehandle.
571 *
572 * This function is called with adap->lock held.
573 */
574 int cec_transmit_msg_fh(struct cec_adapter *adap, struct cec_msg *msg,
575 struct cec_fh *fh, bool block)
576 {
577 struct cec_data *data;
578 u8 last_initiator = 0xff;
579 unsigned int timeout;
580 int res = 0;
581
582 msg->rx_ts = 0;
583 msg->tx_ts = 0;
584 msg->rx_status = 0;
585 msg->tx_status = 0;
586 msg->tx_arb_lost_cnt = 0;
587 msg->tx_nack_cnt = 0;
588 msg->tx_low_drive_cnt = 0;
589 msg->tx_error_cnt = 0;
590 msg->flags = 0;
591 msg->sequence = ++adap->sequence;
592 if (!msg->sequence)
593 msg->sequence = ++adap->sequence;
594
595 if (msg->reply && msg->timeout == 0) {
596 /* Make sure the timeout isn't 0. */
597 msg->timeout = 1000;
598 }
599
600 /* Sanity checks */
601 if (msg->len == 0 || msg->len > CEC_MAX_MSG_SIZE) {
602 dprintk(1, "cec_transmit_msg: invalid length %d\n", msg->len);
603 return -EINVAL;
604 }
605 if (msg->timeout && msg->len == 1) {
606 dprintk(1, "cec_transmit_msg: can't reply for poll msg\n");
607 return -EINVAL;
608 }
609 memset(msg->msg + msg->len, 0, sizeof(msg->msg) - msg->len);
610 if (msg->len == 1) {
611 if (cec_msg_initiator(msg) != 0xf ||
612 cec_msg_destination(msg) == 0xf) {
613 dprintk(1, "cec_transmit_msg: invalid poll message\n");
614 return -EINVAL;
615 }
616 if (cec_has_log_addr(adap, cec_msg_destination(msg))) {
617 /*
618 * If the destination is a logical address our adapter
619 * has already claimed, then just NACK this.
620 * It depends on the hardware what it will do with a
621 * POLL to itself (some OK this), so it is just as
622 * easy to handle it here so the behavior will be
623 * consistent.
624 */
625 msg->tx_ts = ktime_get_ns();
626 msg->tx_status = CEC_TX_STATUS_NACK |
627 CEC_TX_STATUS_MAX_RETRIES;
628 msg->tx_nack_cnt = 1;
629 return 0;
630 }
631 }
632 if (msg->len > 1 && !cec_msg_is_broadcast(msg) &&
633 cec_has_log_addr(adap, cec_msg_destination(msg))) {
634 dprintk(1, "cec_transmit_msg: destination is the adapter itself\n");
635 return -EINVAL;
636 }
637 if (cec_msg_initiator(msg) != 0xf &&
638 !cec_has_log_addr(adap, cec_msg_initiator(msg))) {
639 dprintk(1, "cec_transmit_msg: initiator has unknown logical address %d\n",
640 cec_msg_initiator(msg));
641 return -EINVAL;
642 }
643 if (!adap->is_configured && !adap->is_configuring)
644 return -ENONET;
645
646 if (adap->transmit_queue_sz >= CEC_MAX_MSG_TX_QUEUE_SZ)
647 return -EBUSY;
648
649 data = kzalloc(sizeof(*data), GFP_KERNEL);
650 if (!data)
651 return -ENOMEM;
652
653 if (msg->len > 1 && msg->msg[1] == CEC_MSG_CDC_MESSAGE) {
654 msg->msg[2] = adap->phys_addr >> 8;
655 msg->msg[3] = adap->phys_addr & 0xff;
656 }
657
658 if (msg->timeout)
659 dprintk(2, "cec_transmit_msg: %*ph (wait for 0x%02x%s)\n",
660 msg->len, msg->msg, msg->reply, !block ? ", nb" : "");
661 else
662 dprintk(2, "cec_transmit_msg: %*ph%s\n",
663 msg->len, msg->msg, !block ? " (nb)" : "");
664
665 data->msg = *msg;
666 data->fh = fh;
667 data->adap = adap;
668 data->blocking = block;
669
670 /*
671 * Determine if this message follows a message from the same
672 * initiator. Needed to determine the free signal time later on.
673 */
674 if (msg->len > 1) {
675 if (!(list_empty(&adap->transmit_queue))) {
676 const struct cec_data *last;
677
678 last = list_last_entry(&adap->transmit_queue,
679 const struct cec_data, list);
680 last_initiator = cec_msg_initiator(&last->msg);
681 } else if (adap->transmitting) {
682 last_initiator =
683 cec_msg_initiator(&adap->transmitting->msg);
684 }
685 }
686 data->new_initiator = last_initiator != cec_msg_initiator(msg);
687 init_completion(&data->c);
688 INIT_DELAYED_WORK(&data->work, cec_wait_timeout);
689
690 if (fh)
691 list_add_tail(&data->xfer_list, &fh->xfer_list);
692 list_add_tail(&data->list, &adap->transmit_queue);
693 adap->transmit_queue_sz++;
694 if (!adap->transmitting)
695 wake_up_interruptible(&adap->kthread_waitq);
696
697 /* All done if we don't need to block waiting for completion */
698 if (!block)
699 return 0;
700
701 /*
702 * If we don't get a completion before this time something is really
703 * wrong and we time out.
704 */
705 timeout = CEC_XFER_TIMEOUT_MS;
706 /* Add the requested timeout if we have to wait for a reply as well */
707 if (msg->timeout)
708 timeout += msg->timeout;
709
710 /*
711 * Release the lock and wait, retake the lock afterwards.
712 */
713 mutex_unlock(&adap->lock);
714 res = wait_for_completion_killable_timeout(&data->c,
715 msecs_to_jiffies(timeout));
716 mutex_lock(&adap->lock);
717
718 if (data->completed) {
719 /* The transmit completed (possibly with an error) */
720 *msg = data->msg;
721 kfree(data);
722 return 0;
723 }
724 /*
725 * The wait for completion timed out or was interrupted, so mark this
726 * as non-blocking and disconnect from the filehandle since it is
727 * still 'in flight'. When it finally completes it will just drop the
728 * result silently.
729 */
730 data->blocking = false;
731 if (data->fh)
732 list_del(&data->xfer_list);
733 data->fh = NULL;
734
735 if (res == 0) { /* timed out */
736 /* Check if the reply or the transmit failed */
737 if (msg->timeout && (msg->tx_status & CEC_TX_STATUS_OK))
738 msg->rx_status = CEC_RX_STATUS_TIMEOUT;
739 else
740 msg->tx_status = CEC_TX_STATUS_MAX_RETRIES;
741 }
742 return res > 0 ? 0 : res;
743 }
744
745 /* Helper function to be used by drivers and this framework. */
746 int cec_transmit_msg(struct cec_adapter *adap, struct cec_msg *msg,
747 bool block)
748 {
749 int ret;
750
751 mutex_lock(&adap->lock);
752 ret = cec_transmit_msg_fh(adap, msg, NULL, block);
753 mutex_unlock(&adap->lock);
754 return ret;
755 }
756 EXPORT_SYMBOL_GPL(cec_transmit_msg);
757
758 /*
759 * I don't like forward references but without this the low-level
760 * cec_received_msg() function would come after a bunch of high-level
761 * CEC protocol handling functions. That was very confusing.
762 */
763 static int cec_receive_notify(struct cec_adapter *adap, struct cec_msg *msg,
764 bool is_reply);
765
766 /* Called by the CEC adapter if a message is received */
767 void cec_received_msg(struct cec_adapter *adap, struct cec_msg *msg)
768 {
769 struct cec_data *data;
770 u8 msg_init = cec_msg_initiator(msg);
771 u8 msg_dest = cec_msg_destination(msg);
772 bool is_reply = false;
773 bool valid_la = true;
774
775 if (WARN_ON(!msg->len || msg->len > CEC_MAX_MSG_SIZE))
776 return;
777
778 msg->rx_ts = ktime_get_ns();
779 msg->rx_status = CEC_RX_STATUS_OK;
780 msg->sequence = msg->reply = msg->timeout = 0;
781 msg->tx_status = 0;
782 msg->tx_ts = 0;
783 msg->flags = 0;
784 memset(msg->msg + msg->len, 0, sizeof(msg->msg) - msg->len);
785
786 mutex_lock(&adap->lock);
787 dprintk(2, "cec_received_msg: %*ph\n", msg->len, msg->msg);
788
789 /* Check if this message was for us (directed or broadcast). */
790 if (!cec_msg_is_broadcast(msg))
791 valid_la = cec_has_log_addr(adap, msg_dest);
792
793 /* It's a valid message and not a poll or CDC message */
794 if (valid_la && msg->len > 1 && msg->msg[1] != CEC_MSG_CDC_MESSAGE) {
795 u8 cmd = msg->msg[1];
796 bool abort = cmd == CEC_MSG_FEATURE_ABORT;
797
798 /* The aborted command is in msg[2] */
799 if (abort)
800 cmd = msg->msg[2];
801
802 /*
803 * Walk over all transmitted messages that are waiting for a
804 * reply.
805 */
806 list_for_each_entry(data, &adap->wait_queue, list) {
807 struct cec_msg *dst = &data->msg;
808
809 /* Does the command match? */
810 if ((abort && cmd != dst->msg[1]) ||
811 (!abort && cmd != dst->reply))
812 continue;
813
814 /* Does the addressing match? */
815 if (msg_init != cec_msg_destination(dst) &&
816 !cec_msg_is_broadcast(dst))
817 continue;
818
819 /* We got a reply */
820 memcpy(dst->msg, msg->msg, msg->len);
821 dst->len = msg->len;
822 dst->rx_ts = msg->rx_ts;
823 dst->rx_status = msg->rx_status;
824 if (abort)
825 dst->rx_status |= CEC_RX_STATUS_FEATURE_ABORT;
826 /* Remove it from the wait_queue */
827 list_del_init(&data->list);
828
829 /* Cancel the pending timeout work */
830 if (!cancel_delayed_work(&data->work)) {
831 mutex_unlock(&adap->lock);
832 flush_scheduled_work();
833 mutex_lock(&adap->lock);
834 }
835 /*
836 * Mark this as a reply, provided someone is still
837 * waiting for the answer.
838 */
839 if (data->fh)
840 is_reply = true;
841 cec_data_completed(data);
842 break;
843 }
844 }
845 mutex_unlock(&adap->lock);
846
847 /* Pass the message on to any monitoring filehandles */
848 cec_queue_msg_monitor(adap, msg, valid_la);
849
850 /* We're done if it is not for us or a poll message */
851 if (!valid_la || msg->len <= 1)
852 return;
853
854 if (adap->log_addrs.log_addr_mask == 0)
855 return;
856
857 /*
858 * Process the message on the protocol level. If is_reply is true,
859 * then cec_receive_notify() won't pass on the reply to the listener(s)
860 * since that was already done by cec_data_completed() above.
861 */
862 cec_receive_notify(adap, msg, is_reply);
863 }
864 EXPORT_SYMBOL_GPL(cec_received_msg);
865
866 /* Logical Address Handling */
867
868 /*
869 * Attempt to claim a specific logical address.
870 *
871 * This function is called with adap->lock held.
872 */
873 static int cec_config_log_addr(struct cec_adapter *adap,
874 unsigned int idx,
875 unsigned int log_addr)
876 {
877 struct cec_log_addrs *las = &adap->log_addrs;
878 struct cec_msg msg = { };
879 int err;
880
881 if (cec_has_log_addr(adap, log_addr))
882 return 0;
883
884 /* Send poll message */
885 msg.len = 1;
886 msg.msg[0] = 0xf0 | log_addr;
887 err = cec_transmit_msg_fh(adap, &msg, NULL, true);
888
889 /*
890 * While trying to poll the physical address was reset
891 * and the adapter was unconfigured, so bail out.
892 */
893 if (!adap->is_configuring)
894 return -EINTR;
895
896 if (err)
897 return err;
898
899 if (msg.tx_status & CEC_TX_STATUS_OK)
900 return 0;
901
902 /*
903 * Message not acknowledged, so this logical
904 * address is free to use.
905 */
906 err = adap->ops->adap_log_addr(adap, log_addr);
907 if (err)
908 return err;
909
910 las->log_addr[idx] = log_addr;
911 las->log_addr_mask |= 1 << log_addr;
912 adap->phys_addrs[log_addr] = adap->phys_addr;
913
914 dprintk(2, "claimed addr %d (%d)\n", log_addr,
915 las->primary_device_type[idx]);
916 return 1;
917 }
918
919 /*
920 * Unconfigure the adapter: clear all logical addresses and send
921 * the state changed event.
922 *
923 * This function is called with adap->lock held.
924 */
925 static void cec_adap_unconfigure(struct cec_adapter *adap)
926 {
927 WARN_ON(adap->ops->adap_log_addr(adap, CEC_LOG_ADDR_INVALID));
928 adap->log_addrs.log_addr_mask = 0;
929 adap->is_configuring = false;
930 adap->is_configured = false;
931 memset(adap->phys_addrs, 0xff, sizeof(adap->phys_addrs));
932 wake_up_interruptible(&adap->kthread_waitq);
933 cec_post_state_event(adap);
934 }
935
936 /*
937 * Attempt to claim the required logical addresses.
938 */
939 static int cec_config_thread_func(void *arg)
940 {
941 /* The various LAs for each type of device */
942 static const u8 tv_log_addrs[] = {
943 CEC_LOG_ADDR_TV, CEC_LOG_ADDR_SPECIFIC,
944 CEC_LOG_ADDR_INVALID
945 };
946 static const u8 record_log_addrs[] = {
947 CEC_LOG_ADDR_RECORD_1, CEC_LOG_ADDR_RECORD_2,
948 CEC_LOG_ADDR_RECORD_3,
949 CEC_LOG_ADDR_BACKUP_1, CEC_LOG_ADDR_BACKUP_2,
950 CEC_LOG_ADDR_INVALID
951 };
952 static const u8 tuner_log_addrs[] = {
953 CEC_LOG_ADDR_TUNER_1, CEC_LOG_ADDR_TUNER_2,
954 CEC_LOG_ADDR_TUNER_3, CEC_LOG_ADDR_TUNER_4,
955 CEC_LOG_ADDR_BACKUP_1, CEC_LOG_ADDR_BACKUP_2,
956 CEC_LOG_ADDR_INVALID
957 };
958 static const u8 playback_log_addrs[] = {
959 CEC_LOG_ADDR_PLAYBACK_1, CEC_LOG_ADDR_PLAYBACK_2,
960 CEC_LOG_ADDR_PLAYBACK_3,
961 CEC_LOG_ADDR_BACKUP_1, CEC_LOG_ADDR_BACKUP_2,
962 CEC_LOG_ADDR_INVALID
963 };
964 static const u8 audiosystem_log_addrs[] = {
965 CEC_LOG_ADDR_AUDIOSYSTEM,
966 CEC_LOG_ADDR_INVALID
967 };
968 static const u8 specific_use_log_addrs[] = {
969 CEC_LOG_ADDR_SPECIFIC,
970 CEC_LOG_ADDR_BACKUP_1, CEC_LOG_ADDR_BACKUP_2,
971 CEC_LOG_ADDR_INVALID
972 };
973 static const u8 *type2addrs[6] = {
974 [CEC_LOG_ADDR_TYPE_TV] = tv_log_addrs,
975 [CEC_LOG_ADDR_TYPE_RECORD] = record_log_addrs,
976 [CEC_LOG_ADDR_TYPE_TUNER] = tuner_log_addrs,
977 [CEC_LOG_ADDR_TYPE_PLAYBACK] = playback_log_addrs,
978 [CEC_LOG_ADDR_TYPE_AUDIOSYSTEM] = audiosystem_log_addrs,
979 [CEC_LOG_ADDR_TYPE_SPECIFIC] = specific_use_log_addrs,
980 };
981 static const u16 type2mask[] = {
982 [CEC_LOG_ADDR_TYPE_TV] = CEC_LOG_ADDR_MASK_TV,
983 [CEC_LOG_ADDR_TYPE_RECORD] = CEC_LOG_ADDR_MASK_RECORD,
984 [CEC_LOG_ADDR_TYPE_TUNER] = CEC_LOG_ADDR_MASK_TUNER,
985 [CEC_LOG_ADDR_TYPE_PLAYBACK] = CEC_LOG_ADDR_MASK_PLAYBACK,
986 [CEC_LOG_ADDR_TYPE_AUDIOSYSTEM] = CEC_LOG_ADDR_MASK_AUDIOSYSTEM,
987 [CEC_LOG_ADDR_TYPE_SPECIFIC] = CEC_LOG_ADDR_MASK_SPECIFIC,
988 };
989 struct cec_adapter *adap = arg;
990 struct cec_log_addrs *las = &adap->log_addrs;
991 int err;
992 int i, j;
993
994 mutex_lock(&adap->lock);
995 dprintk(1, "physical address: %x.%x.%x.%x, claim %d logical addresses\n",
996 cec_phys_addr_exp(adap->phys_addr), las->num_log_addrs);
997 las->log_addr_mask = 0;
998
999 if (las->log_addr_type[0] == CEC_LOG_ADDR_TYPE_UNREGISTERED)
1000 goto configured;
1001
1002 for (i = 0; i < las->num_log_addrs; i++) {
1003 unsigned int type = las->log_addr_type[i];
1004 const u8 *la_list;
1005 u8 last_la;
1006
1007 /*
1008 * The TV functionality can only map to physical address 0.
1009 * For any other address, try the Specific functionality
1010 * instead as per the spec.
1011 */
1012 if (adap->phys_addr && type == CEC_LOG_ADDR_TYPE_TV)
1013 type = CEC_LOG_ADDR_TYPE_SPECIFIC;
1014
1015 la_list = type2addrs[type];
1016 last_la = las->log_addr[i];
1017 las->log_addr[i] = CEC_LOG_ADDR_INVALID;
1018 if (last_la == CEC_LOG_ADDR_INVALID ||
1019 last_la == CEC_LOG_ADDR_UNREGISTERED ||
1020 !(last_la & type2mask[type]))
1021 last_la = la_list[0];
1022
1023 err = cec_config_log_addr(adap, i, last_la);
1024 if (err > 0) /* Reused last LA */
1025 continue;
1026
1027 if (err < 0)
1028 goto unconfigure;
1029
1030 for (j = 0; la_list[j] != CEC_LOG_ADDR_INVALID; j++) {
1031 /* Tried this one already, skip it */
1032 if (la_list[j] == last_la)
1033 continue;
1034 /* The backup addresses are CEC 2.0 specific */
1035 if ((la_list[j] == CEC_LOG_ADDR_BACKUP_1 ||
1036 la_list[j] == CEC_LOG_ADDR_BACKUP_2) &&
1037 las->cec_version < CEC_OP_CEC_VERSION_2_0)
1038 continue;
1039
1040 err = cec_config_log_addr(adap, i, la_list[j]);
1041 if (err == 0) /* LA is in use */
1042 continue;
1043 if (err < 0)
1044 goto unconfigure;
1045 /* Done, claimed an LA */
1046 break;
1047 }
1048
1049 if (la_list[j] == CEC_LOG_ADDR_INVALID)
1050 dprintk(1, "could not claim LA %d\n", i);
1051 }
1052
1053 if (adap->log_addrs.log_addr_mask == 0 &&
1054 !(las->flags & CEC_LOG_ADDRS_FL_ALLOW_UNREG_FALLBACK))
1055 goto unconfigure;
1056
1057 configured:
1058 if (adap->log_addrs.log_addr_mask == 0) {
1059 /* Fall back to unregistered */
1060 las->log_addr[0] = CEC_LOG_ADDR_UNREGISTERED;
1061 las->log_addr_mask = 1 << las->log_addr[0];
1062 for (i = 1; i < las->num_log_addrs; i++)
1063 las->log_addr[i] = CEC_LOG_ADDR_INVALID;
1064 }
1065 adap->is_configured = true;
1066 adap->is_configuring = false;
1067 cec_post_state_event(adap);
1068 mutex_unlock(&adap->lock);
1069
1070 for (i = 0; i < las->num_log_addrs; i++) {
1071 if (las->log_addr[i] == CEC_LOG_ADDR_INVALID)
1072 continue;
1073
1074 /*
1075 * Report Features must come first according
1076 * to CEC 2.0
1077 */
1078 if (las->log_addr[i] != CEC_LOG_ADDR_UNREGISTERED)
1079 cec_report_features(adap, i);
1080 cec_report_phys_addr(adap, i);
1081 }
1082 for (i = las->num_log_addrs; i < CEC_MAX_LOG_ADDRS; i++)
1083 las->log_addr[i] = CEC_LOG_ADDR_INVALID;
1084 mutex_lock(&adap->lock);
1085 adap->kthread_config = NULL;
1086 mutex_unlock(&adap->lock);
1087 complete(&adap->config_completion);
1088 return 0;
1089
1090 unconfigure:
1091 for (i = 0; i < las->num_log_addrs; i++)
1092 las->log_addr[i] = CEC_LOG_ADDR_INVALID;
1093 cec_adap_unconfigure(adap);
1094 adap->kthread_config = NULL;
1095 mutex_unlock(&adap->lock);
1096 complete(&adap->config_completion);
1097 return 0;
1098 }
1099
1100 /*
1101 * Called from either __cec_s_phys_addr or __cec_s_log_addrs to claim the
1102 * logical addresses.
1103 *
1104 * This function is called with adap->lock held.
1105 */
1106 static void cec_claim_log_addrs(struct cec_adapter *adap, bool block)
1107 {
1108 if (WARN_ON(adap->is_configuring || adap->is_configured))
1109 return;
1110
1111 init_completion(&adap->config_completion);
1112
1113 /* Ready to kick off the thread */
1114 adap->is_configuring = true;
1115 adap->kthread_config = kthread_run(cec_config_thread_func, adap,
1116 "ceccfg-%s", adap->name);
1117 if (IS_ERR(adap->kthread_config)) {
1118 adap->kthread_config = NULL;
1119 } else if (block) {
1120 mutex_unlock(&adap->lock);
1121 wait_for_completion(&adap->config_completion);
1122 mutex_lock(&adap->lock);
1123 }
1124 }
1125
1126 /* Set a new physical address and send an event notifying userspace of this.
1127 *
1128 * This function is called with adap->lock held.
1129 */
1130 void __cec_s_phys_addr(struct cec_adapter *adap, u16 phys_addr, bool block)
1131 {
1132 if (phys_addr == adap->phys_addr || adap->devnode.unregistered)
1133 return;
1134
1135 if (phys_addr == CEC_PHYS_ADDR_INVALID ||
1136 adap->phys_addr != CEC_PHYS_ADDR_INVALID) {
1137 adap->phys_addr = CEC_PHYS_ADDR_INVALID;
1138 cec_post_state_event(adap);
1139 cec_adap_unconfigure(adap);
1140 /* Disabling monitor all mode should always succeed */
1141 if (adap->monitor_all_cnt)
1142 WARN_ON(call_op(adap, adap_monitor_all_enable, false));
1143 WARN_ON(adap->ops->adap_enable(adap, false));
1144 if (phys_addr == CEC_PHYS_ADDR_INVALID)
1145 return;
1146 }
1147
1148 if (adap->ops->adap_enable(adap, true))
1149 return;
1150
1151 if (adap->monitor_all_cnt &&
1152 call_op(adap, adap_monitor_all_enable, true)) {
1153 WARN_ON(adap->ops->adap_enable(adap, false));
1154 return;
1155 }
1156 adap->phys_addr = phys_addr;
1157 cec_post_state_event(adap);
1158 if (adap->log_addrs.num_log_addrs)
1159 cec_claim_log_addrs(adap, block);
1160 }
1161
1162 void cec_s_phys_addr(struct cec_adapter *adap, u16 phys_addr, bool block)
1163 {
1164 if (IS_ERR_OR_NULL(adap))
1165 return;
1166
1167 if (WARN_ON(adap->capabilities & CEC_CAP_PHYS_ADDR))
1168 return;
1169 mutex_lock(&adap->lock);
1170 __cec_s_phys_addr(adap, phys_addr, block);
1171 mutex_unlock(&adap->lock);
1172 }
1173 EXPORT_SYMBOL_GPL(cec_s_phys_addr);
1174
1175 /*
1176 * Called from either the ioctl or a driver to set the logical addresses.
1177 *
1178 * This function is called with adap->lock held.
1179 */
1180 int __cec_s_log_addrs(struct cec_adapter *adap,
1181 struct cec_log_addrs *log_addrs, bool block)
1182 {
1183 u16 type_mask = 0;
1184 int i;
1185
1186 if (adap->devnode.unregistered)
1187 return -ENODEV;
1188
1189 if (!log_addrs || log_addrs->num_log_addrs == 0) {
1190 adap->log_addrs.num_log_addrs = 0;
1191 cec_adap_unconfigure(adap);
1192 return 0;
1193 }
1194
1195 /* Ensure the osd name is 0-terminated */
1196 log_addrs->osd_name[sizeof(log_addrs->osd_name) - 1] = '\0';
1197
1198 /* Sanity checks */
1199 if (log_addrs->num_log_addrs > adap->available_log_addrs) {
1200 dprintk(1, "num_log_addrs > %d\n", adap->available_log_addrs);
1201 return -EINVAL;
1202 }
1203
1204 /*
1205 * Vendor ID is a 24 bit number, so check if the value is
1206 * within the correct range.
1207 */
1208 if (log_addrs->vendor_id != CEC_VENDOR_ID_NONE &&
1209 (log_addrs->vendor_id & 0xff000000) != 0)
1210 return -EINVAL;
1211
1212 if (log_addrs->cec_version != CEC_OP_CEC_VERSION_1_4 &&
1213 log_addrs->cec_version != CEC_OP_CEC_VERSION_2_0)
1214 return -EINVAL;
1215
1216 if (log_addrs->num_log_addrs > 1)
1217 for (i = 0; i < log_addrs->num_log_addrs; i++)
1218 if (log_addrs->log_addr_type[i] ==
1219 CEC_LOG_ADDR_TYPE_UNREGISTERED) {
1220 dprintk(1, "num_log_addrs > 1 can't be combined with unregistered LA\n");
1221 return -EINVAL;
1222 }
1223
1224 for (i = 0; i < log_addrs->num_log_addrs; i++) {
1225 const u8 feature_sz = ARRAY_SIZE(log_addrs->features[0]);
1226 u8 *features = log_addrs->features[i];
1227 bool op_is_dev_features = false;
1228
1229 log_addrs->log_addr[i] = CEC_LOG_ADDR_INVALID;
1230 if (type_mask & (1 << log_addrs->log_addr_type[i])) {
1231 dprintk(1, "duplicate logical address type\n");
1232 return -EINVAL;
1233 }
1234 type_mask |= 1 << log_addrs->log_addr_type[i];
1235 if ((type_mask & (1 << CEC_LOG_ADDR_TYPE_RECORD)) &&
1236 (type_mask & (1 << CEC_LOG_ADDR_TYPE_PLAYBACK))) {
1237 /* Record already contains the playback functionality */
1238 dprintk(1, "invalid record + playback combination\n");
1239 return -EINVAL;
1240 }
1241 if (log_addrs->primary_device_type[i] >
1242 CEC_OP_PRIM_DEVTYPE_PROCESSOR) {
1243 dprintk(1, "unknown primary device type\n");
1244 return -EINVAL;
1245 }
1246 if (log_addrs->primary_device_type[i] == 2) {
1247 dprintk(1, "invalid primary device type\n");
1248 return -EINVAL;
1249 }
1250 if (log_addrs->log_addr_type[i] > CEC_LOG_ADDR_TYPE_UNREGISTERED) {
1251 dprintk(1, "unknown logical address type\n");
1252 return -EINVAL;
1253 }
1254 for (i = 0; i < feature_sz; i++) {
1255 if ((features[i] & 0x80) == 0) {
1256 if (op_is_dev_features)
1257 break;
1258 op_is_dev_features = true;
1259 }
1260 }
1261 if (!op_is_dev_features || i == feature_sz) {
1262 dprintk(1, "malformed features\n");
1263 return -EINVAL;
1264 }
1265 /* Zero unused part of the feature array */
1266 memset(features + i + 1, 0, feature_sz - i - 1);
1267 }
1268
1269 if (log_addrs->cec_version >= CEC_OP_CEC_VERSION_2_0) {
1270 if (log_addrs->num_log_addrs > 2) {
1271 dprintk(1, "CEC 2.0 allows no more than 2 logical addresses\n");
1272 return -EINVAL;
1273 }
1274 if (log_addrs->num_log_addrs == 2) {
1275 if (!(type_mask & ((1 << CEC_LOG_ADDR_TYPE_AUDIOSYSTEM) |
1276 (1 << CEC_LOG_ADDR_TYPE_TV)))) {
1277 dprintk(1, "Two LAs is only allowed for audiosystem and TV\n");
1278 return -EINVAL;
1279 }
1280 if (!(type_mask & ((1 << CEC_LOG_ADDR_TYPE_PLAYBACK) |
1281 (1 << CEC_LOG_ADDR_TYPE_RECORD)))) {
1282 dprintk(1, "An audiosystem/TV can only be combined with record or playback\n");
1283 return -EINVAL;
1284 }
1285 }
1286 }
1287
1288 /* Zero unused LAs */
1289 for (i = log_addrs->num_log_addrs; i < CEC_MAX_LOG_ADDRS; i++) {
1290 log_addrs->primary_device_type[i] = 0;
1291 log_addrs->log_addr_type[i] = 0;
1292 log_addrs->all_device_types[i] = 0;
1293 memset(log_addrs->features[i], 0,
1294 sizeof(log_addrs->features[i]));
1295 }
1296
1297 log_addrs->log_addr_mask = adap->log_addrs.log_addr_mask;
1298 adap->log_addrs = *log_addrs;
1299 if (adap->phys_addr != CEC_PHYS_ADDR_INVALID)
1300 cec_claim_log_addrs(adap, block);
1301 return 0;
1302 }
1303
1304 int cec_s_log_addrs(struct cec_adapter *adap,
1305 struct cec_log_addrs *log_addrs, bool block)
1306 {
1307 int err;
1308
1309 if (WARN_ON(adap->capabilities & CEC_CAP_LOG_ADDRS))
1310 return -EINVAL;
1311 mutex_lock(&adap->lock);
1312 err = __cec_s_log_addrs(adap, log_addrs, block);
1313 mutex_unlock(&adap->lock);
1314 return err;
1315 }
1316 EXPORT_SYMBOL_GPL(cec_s_log_addrs);
1317
1318 /* High-level core CEC message handling */
1319
1320 /* Transmit the Report Features message */
1321 static int cec_report_features(struct cec_adapter *adap, unsigned int la_idx)
1322 {
1323 struct cec_msg msg = { };
1324 const struct cec_log_addrs *las = &adap->log_addrs;
1325 const u8 *features = las->features[la_idx];
1326 bool op_is_dev_features = false;
1327 unsigned int idx;
1328
1329 /* This is 2.0 and up only */
1330 if (adap->log_addrs.cec_version < CEC_OP_CEC_VERSION_2_0)
1331 return 0;
1332
1333 /* Report Features */
1334 msg.msg[0] = (las->log_addr[la_idx] << 4) | 0x0f;
1335 msg.len = 4;
1336 msg.msg[1] = CEC_MSG_REPORT_FEATURES;
1337 msg.msg[2] = adap->log_addrs.cec_version;
1338 msg.msg[3] = las->all_device_types[la_idx];
1339
1340 /* Write RC Profiles first, then Device Features */
1341 for (idx = 0; idx < ARRAY_SIZE(las->features[0]); idx++) {
1342 msg.msg[msg.len++] = features[idx];
1343 if ((features[idx] & CEC_OP_FEAT_EXT) == 0) {
1344 if (op_is_dev_features)
1345 break;
1346 op_is_dev_features = true;
1347 }
1348 }
1349 return cec_transmit_msg(adap, &msg, false);
1350 }
1351
1352 /* Transmit the Report Physical Address message */
1353 static int cec_report_phys_addr(struct cec_adapter *adap, unsigned int la_idx)
1354 {
1355 const struct cec_log_addrs *las = &adap->log_addrs;
1356 struct cec_msg msg = { };
1357
1358 /* Report Physical Address */
1359 msg.msg[0] = (las->log_addr[la_idx] << 4) | 0x0f;
1360 cec_msg_report_physical_addr(&msg, adap->phys_addr,
1361 las->primary_device_type[la_idx]);
1362 dprintk(2, "config: la %d pa %x.%x.%x.%x\n",
1363 las->log_addr[la_idx],
1364 cec_phys_addr_exp(adap->phys_addr));
1365 return cec_transmit_msg(adap, &msg, false);
1366 }
1367
1368 /* Transmit the Feature Abort message */
1369 static int cec_feature_abort_reason(struct cec_adapter *adap,
1370 struct cec_msg *msg, u8 reason)
1371 {
1372 struct cec_msg tx_msg = { };
1373
1374 /*
1375 * Don't reply with CEC_MSG_FEATURE_ABORT to a CEC_MSG_FEATURE_ABORT
1376 * message!
1377 */
1378 if (msg->msg[1] == CEC_MSG_FEATURE_ABORT)
1379 return 0;
1380 cec_msg_set_reply_to(&tx_msg, msg);
1381 cec_msg_feature_abort(&tx_msg, msg->msg[1], reason);
1382 return cec_transmit_msg(adap, &tx_msg, false);
1383 }
1384
1385 static int cec_feature_abort(struct cec_adapter *adap, struct cec_msg *msg)
1386 {
1387 return cec_feature_abort_reason(adap, msg,
1388 CEC_OP_ABORT_UNRECOGNIZED_OP);
1389 }
1390
1391 static int cec_feature_refused(struct cec_adapter *adap, struct cec_msg *msg)
1392 {
1393 return cec_feature_abort_reason(adap, msg,
1394 CEC_OP_ABORT_REFUSED);
1395 }
1396
1397 /*
1398 * Called when a CEC message is received. This function will do any
1399 * necessary core processing. The is_reply bool is true if this message
1400 * is a reply to an earlier transmit.
1401 *
1402 * The message is either a broadcast message or a valid directed message.
1403 */
1404 static int cec_receive_notify(struct cec_adapter *adap, struct cec_msg *msg,
1405 bool is_reply)
1406 {
1407 bool is_broadcast = cec_msg_is_broadcast(msg);
1408 u8 dest_laddr = cec_msg_destination(msg);
1409 u8 init_laddr = cec_msg_initiator(msg);
1410 u8 devtype = cec_log_addr2dev(adap, dest_laddr);
1411 int la_idx = cec_log_addr2idx(adap, dest_laddr);
1412 bool from_unregistered = init_laddr == 0xf;
1413 struct cec_msg tx_cec_msg = { };
1414
1415 dprintk(1, "cec_receive_notify: %*ph\n", msg->len, msg->msg);
1416
1417 if (adap->ops->received) {
1418 /* Allow drivers to process the message first */
1419 if (adap->ops->received(adap, msg) != -ENOMSG)
1420 return 0;
1421 }
1422
1423 /*
1424 * REPORT_PHYSICAL_ADDR, CEC_MSG_USER_CONTROL_PRESSED and
1425 * CEC_MSG_USER_CONTROL_RELEASED messages always have to be
1426 * handled by the CEC core, even if the passthrough mode is on.
1427 * The others are just ignored if passthrough mode is on.
1428 */
1429 switch (msg->msg[1]) {
1430 case CEC_MSG_GET_CEC_VERSION:
1431 case CEC_MSG_GIVE_DEVICE_VENDOR_ID:
1432 case CEC_MSG_ABORT:
1433 case CEC_MSG_GIVE_DEVICE_POWER_STATUS:
1434 case CEC_MSG_GIVE_PHYSICAL_ADDR:
1435 case CEC_MSG_GIVE_OSD_NAME:
1436 case CEC_MSG_GIVE_FEATURES:
1437 /*
1438 * Skip processing these messages if the passthrough mode
1439 * is on.
1440 */
1441 if (adap->passthrough)
1442 goto skip_processing;
1443 /* Ignore if addressing is wrong */
1444 if (is_broadcast || from_unregistered)
1445 return 0;
1446 break;
1447
1448 case CEC_MSG_USER_CONTROL_PRESSED:
1449 case CEC_MSG_USER_CONTROL_RELEASED:
1450 /* Wrong addressing mode: don't process */
1451 if (is_broadcast || from_unregistered)
1452 goto skip_processing;
1453 break;
1454
1455 case CEC_MSG_REPORT_PHYSICAL_ADDR:
1456 /*
1457 * This message is always processed, regardless of the
1458 * passthrough setting.
1459 *
1460 * Exception: don't process if wrong addressing mode.
1461 */
1462 if (!is_broadcast)
1463 goto skip_processing;
1464 break;
1465
1466 default:
1467 break;
1468 }
1469
1470 cec_msg_set_reply_to(&tx_cec_msg, msg);
1471
1472 switch (msg->msg[1]) {
1473 /* The following messages are processed but still passed through */
1474 case CEC_MSG_REPORT_PHYSICAL_ADDR: {
1475 u16 pa = (msg->msg[2] << 8) | msg->msg[3];
1476
1477 if (!from_unregistered)
1478 adap->phys_addrs[init_laddr] = pa;
1479 dprintk(1, "Reported physical address %x.%x.%x.%x for logical address %d\n",
1480 cec_phys_addr_exp(pa), init_laddr);
1481 break;
1482 }
1483
1484 case CEC_MSG_USER_CONTROL_PRESSED:
1485 if (!(adap->capabilities & CEC_CAP_RC))
1486 break;
1487
1488 #if IS_REACHABLE(CONFIG_RC_CORE)
1489 switch (msg->msg[2]) {
1490 /*
1491 * Play function, this message can have variable length
1492 * depending on the specific play function that is used.
1493 */
1494 case 0x60:
1495 if (msg->len == 2)
1496 rc_keydown(adap->rc, RC_TYPE_CEC,
1497 msg->msg[2], 0);
1498 else
1499 rc_keydown(adap->rc, RC_TYPE_CEC,
1500 msg->msg[2] << 8 | msg->msg[3], 0);
1501 break;
1502 /*
1503 * Other function messages that are not handled.
1504 * Currently the RC framework does not allow to supply an
1505 * additional parameter to a keypress. These "keys" contain
1506 * other information such as channel number, an input number
1507 * etc.
1508 * For the time being these messages are not processed by the
1509 * framework and are simply forwarded to the user space.
1510 */
1511 case 0x56: case 0x57:
1512 case 0x67: case 0x68: case 0x69: case 0x6a:
1513 break;
1514 default:
1515 rc_keydown(adap->rc, RC_TYPE_CEC, msg->msg[2], 0);
1516 break;
1517 }
1518 #endif
1519 break;
1520
1521 case CEC_MSG_USER_CONTROL_RELEASED:
1522 if (!(adap->capabilities & CEC_CAP_RC))
1523 break;
1524 #if IS_REACHABLE(CONFIG_RC_CORE)
1525 rc_keyup(adap->rc);
1526 #endif
1527 break;
1528
1529 /*
1530 * The remaining messages are only processed if the passthrough mode
1531 * is off.
1532 */
1533 case CEC_MSG_GET_CEC_VERSION:
1534 cec_msg_cec_version(&tx_cec_msg, adap->log_addrs.cec_version);
1535 return cec_transmit_msg(adap, &tx_cec_msg, false);
1536
1537 case CEC_MSG_GIVE_PHYSICAL_ADDR:
1538 /* Do nothing for CEC switches using addr 15 */
1539 if (devtype == CEC_OP_PRIM_DEVTYPE_SWITCH && dest_laddr == 15)
1540 return 0;
1541 cec_msg_report_physical_addr(&tx_cec_msg, adap->phys_addr, devtype);
1542 return cec_transmit_msg(adap, &tx_cec_msg, false);
1543
1544 case CEC_MSG_GIVE_DEVICE_VENDOR_ID:
1545 if (adap->log_addrs.vendor_id == CEC_VENDOR_ID_NONE)
1546 return cec_feature_abort(adap, msg);
1547 cec_msg_device_vendor_id(&tx_cec_msg, adap->log_addrs.vendor_id);
1548 return cec_transmit_msg(adap, &tx_cec_msg, false);
1549
1550 case CEC_MSG_ABORT:
1551 /* Do nothing for CEC switches */
1552 if (devtype == CEC_OP_PRIM_DEVTYPE_SWITCH)
1553 return 0;
1554 return cec_feature_refused(adap, msg);
1555
1556 case CEC_MSG_GIVE_OSD_NAME: {
1557 if (adap->log_addrs.osd_name[0] == 0)
1558 return cec_feature_abort(adap, msg);
1559 cec_msg_set_osd_name(&tx_cec_msg, adap->log_addrs.osd_name);
1560 return cec_transmit_msg(adap, &tx_cec_msg, false);
1561 }
1562
1563 case CEC_MSG_GIVE_FEATURES:
1564 if (adap->log_addrs.cec_version >= CEC_OP_CEC_VERSION_2_0)
1565 return cec_report_features(adap, la_idx);
1566 return 0;
1567
1568 default:
1569 /*
1570 * Unprocessed messages are aborted if userspace isn't doing
1571 * any processing either.
1572 */
1573 if (!is_broadcast && !is_reply && !adap->follower_cnt &&
1574 !adap->cec_follower && msg->msg[1] != CEC_MSG_FEATURE_ABORT)
1575 return cec_feature_abort(adap, msg);
1576 break;
1577 }
1578
1579 skip_processing:
1580 /* If this was a reply, then we're done */
1581 if (is_reply)
1582 return 0;
1583
1584 /*
1585 * Send to the exclusive follower if there is one, otherwise send
1586 * to all followers.
1587 */
1588 if (adap->cec_follower)
1589 cec_queue_msg_fh(adap->cec_follower, msg);
1590 else
1591 cec_queue_msg_followers(adap, msg);
1592 return 0;
1593 }
1594
1595 /*
1596 * Helper functions to keep track of the 'monitor all' use count.
1597 *
1598 * These functions are called with adap->lock held.
1599 */
1600 int cec_monitor_all_cnt_inc(struct cec_adapter *adap)
1601 {
1602 int ret = 0;
1603
1604 if (adap->monitor_all_cnt == 0)
1605 ret = call_op(adap, adap_monitor_all_enable, 1);
1606 if (ret == 0)
1607 adap->monitor_all_cnt++;
1608 return ret;
1609 }
1610
1611 void cec_monitor_all_cnt_dec(struct cec_adapter *adap)
1612 {
1613 adap->monitor_all_cnt--;
1614 if (adap->monitor_all_cnt == 0)
1615 WARN_ON(call_op(adap, adap_monitor_all_enable, 0));
1616 }
1617
1618 #ifdef CONFIG_MEDIA_CEC_DEBUG
1619 /*
1620 * Log the current state of the CEC adapter.
1621 * Very useful for debugging.
1622 */
1623 int cec_adap_status(struct seq_file *file, void *priv)
1624 {
1625 struct cec_adapter *adap = dev_get_drvdata(file->private);
1626 struct cec_data *data;
1627
1628 mutex_lock(&adap->lock);
1629 seq_printf(file, "configured: %d\n", adap->is_configured);
1630 seq_printf(file, "configuring: %d\n", adap->is_configuring);
1631 seq_printf(file, "phys_addr: %x.%x.%x.%x\n",
1632 cec_phys_addr_exp(adap->phys_addr));
1633 seq_printf(file, "number of LAs: %d\n", adap->log_addrs.num_log_addrs);
1634 seq_printf(file, "LA mask: 0x%04x\n", adap->log_addrs.log_addr_mask);
1635 if (adap->cec_follower)
1636 seq_printf(file, "has CEC follower%s\n",
1637 adap->passthrough ? " (in passthrough mode)" : "");
1638 if (adap->cec_initiator)
1639 seq_puts(file, "has CEC initiator\n");
1640 if (adap->monitor_all_cnt)
1641 seq_printf(file, "file handles in Monitor All mode: %u\n",
1642 adap->monitor_all_cnt);
1643 data = adap->transmitting;
1644 if (data)
1645 seq_printf(file, "transmitting message: %*ph (reply: %02x, timeout: %ums)\n",
1646 data->msg.len, data->msg.msg, data->msg.reply,
1647 data->msg.timeout);
1648 seq_printf(file, "pending transmits: %u\n", adap->transmit_queue_sz);
1649 list_for_each_entry(data, &adap->transmit_queue, list) {
1650 seq_printf(file, "queued tx message: %*ph (reply: %02x, timeout: %ums)\n",
1651 data->msg.len, data->msg.msg, data->msg.reply,
1652 data->msg.timeout);
1653 }
1654 list_for_each_entry(data, &adap->wait_queue, list) {
1655 seq_printf(file, "message waiting for reply: %*ph (reply: %02x, timeout: %ums)\n",
1656 data->msg.len, data->msg.msg, data->msg.reply,
1657 data->msg.timeout);
1658 }
1659
1660 call_void_op(adap, adap_status, file);
1661 mutex_unlock(&adap->lock);
1662 return 0;
1663 }
1664 #endif