]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - drivers/media/cec/cec-api.c
media: cec: integrate cec_validate_phys_addr() in cec-api.c
[mirror_ubuntu-bionic-kernel.git] / drivers / media / cec / cec-api.c
1 /*
2 * cec-api.c - HDMI Consumer Electronics Control framework - API
3 *
4 * Copyright 2016 Cisco Systems, Inc. and/or its affiliates. All rights reserved.
5 *
6 * This program is free software; you may redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; version 2 of the License.
9 *
10 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
11 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
12 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
13 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
14 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
15 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
16 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
17 * SOFTWARE.
18 */
19
20 #include <linux/errno.h>
21 #include <linux/init.h>
22 #include <linux/module.h>
23 #include <linux/kernel.h>
24 #include <linux/kmod.h>
25 #include <linux/ktime.h>
26 #include <linux/slab.h>
27 #include <linux/mm.h>
28 #include <linux/string.h>
29 #include <linux/types.h>
30 #include <linux/uaccess.h>
31 #include <linux/version.h>
32
33 #include <media/cec-pin.h>
34 #include "cec-priv.h"
35 #include "cec-pin-priv.h"
36
37 static inline struct cec_devnode *cec_devnode_data(struct file *filp)
38 {
39 struct cec_fh *fh = filp->private_data;
40
41 return &fh->adap->devnode;
42 }
43
44 /* CEC file operations */
45
46 static unsigned int cec_poll(struct file *filp,
47 struct poll_table_struct *poll)
48 {
49 struct cec_devnode *devnode = cec_devnode_data(filp);
50 struct cec_fh *fh = filp->private_data;
51 struct cec_adapter *adap = fh->adap;
52 unsigned int res = 0;
53
54 if (!devnode->registered)
55 return POLLERR | POLLHUP;
56 mutex_lock(&adap->lock);
57 if (adap->is_configured &&
58 adap->transmit_queue_sz < CEC_MAX_MSG_TX_QUEUE_SZ)
59 res |= POLLOUT | POLLWRNORM;
60 if (fh->queued_msgs)
61 res |= POLLIN | POLLRDNORM;
62 if (fh->total_queued_events)
63 res |= POLLPRI;
64 poll_wait(filp, &fh->wait, poll);
65 mutex_unlock(&adap->lock);
66 return res;
67 }
68
69 static bool cec_is_busy(const struct cec_adapter *adap,
70 const struct cec_fh *fh)
71 {
72 bool valid_initiator = adap->cec_initiator && adap->cec_initiator == fh;
73 bool valid_follower = adap->cec_follower && adap->cec_follower == fh;
74
75 /*
76 * Exclusive initiators and followers can always access the CEC adapter
77 */
78 if (valid_initiator || valid_follower)
79 return false;
80 /*
81 * All others can only access the CEC adapter if there is no
82 * exclusive initiator and they are in INITIATOR mode.
83 */
84 return adap->cec_initiator ||
85 fh->mode_initiator == CEC_MODE_NO_INITIATOR;
86 }
87
88 static long cec_adap_g_caps(struct cec_adapter *adap,
89 struct cec_caps __user *parg)
90 {
91 struct cec_caps caps = {};
92
93 strlcpy(caps.driver, adap->devnode.dev.parent->driver->name,
94 sizeof(caps.driver));
95 strlcpy(caps.name, adap->name, sizeof(caps.name));
96 caps.available_log_addrs = adap->available_log_addrs;
97 caps.capabilities = adap->capabilities;
98 caps.version = LINUX_VERSION_CODE;
99 if (copy_to_user(parg, &caps, sizeof(caps)))
100 return -EFAULT;
101 return 0;
102 }
103
104 static long cec_adap_g_phys_addr(struct cec_adapter *adap,
105 __u16 __user *parg)
106 {
107 u16 phys_addr;
108
109 mutex_lock(&adap->lock);
110 phys_addr = adap->phys_addr;
111 mutex_unlock(&adap->lock);
112 if (copy_to_user(parg, &phys_addr, sizeof(phys_addr)))
113 return -EFAULT;
114 return 0;
115 }
116
117 static int cec_validate_phys_addr(u16 phys_addr)
118 {
119 int i;
120
121 if (phys_addr == CEC_PHYS_ADDR_INVALID)
122 return 0;
123 for (i = 0; i < 16; i += 4)
124 if (phys_addr & (0xf << i))
125 break;
126 if (i == 16)
127 return 0;
128 for (i += 4; i < 16; i += 4)
129 if ((phys_addr & (0xf << i)) == 0)
130 return -EINVAL;
131 return 0;
132 }
133
134 static long cec_adap_s_phys_addr(struct cec_adapter *adap, struct cec_fh *fh,
135 bool block, __u16 __user *parg)
136 {
137 u16 phys_addr;
138 long err;
139
140 if (!(adap->capabilities & CEC_CAP_PHYS_ADDR))
141 return -ENOTTY;
142 if (copy_from_user(&phys_addr, parg, sizeof(phys_addr)))
143 return -EFAULT;
144
145 err = cec_validate_phys_addr(phys_addr);
146 if (err)
147 return err;
148 mutex_lock(&adap->lock);
149 if (cec_is_busy(adap, fh))
150 err = -EBUSY;
151 else
152 __cec_s_phys_addr(adap, phys_addr, block);
153 mutex_unlock(&adap->lock);
154 return err;
155 }
156
157 static long cec_adap_g_log_addrs(struct cec_adapter *adap,
158 struct cec_log_addrs __user *parg)
159 {
160 struct cec_log_addrs log_addrs;
161
162 mutex_lock(&adap->lock);
163 log_addrs = adap->log_addrs;
164 if (!adap->is_configured)
165 memset(log_addrs.log_addr, CEC_LOG_ADDR_INVALID,
166 sizeof(log_addrs.log_addr));
167 mutex_unlock(&adap->lock);
168
169 if (copy_to_user(parg, &log_addrs, sizeof(log_addrs)))
170 return -EFAULT;
171 return 0;
172 }
173
174 static long cec_adap_s_log_addrs(struct cec_adapter *adap, struct cec_fh *fh,
175 bool block, struct cec_log_addrs __user *parg)
176 {
177 struct cec_log_addrs log_addrs;
178 long err = -EBUSY;
179
180 if (!(adap->capabilities & CEC_CAP_LOG_ADDRS))
181 return -ENOTTY;
182 if (copy_from_user(&log_addrs, parg, sizeof(log_addrs)))
183 return -EFAULT;
184 log_addrs.flags &= CEC_LOG_ADDRS_FL_ALLOW_UNREG_FALLBACK |
185 CEC_LOG_ADDRS_FL_ALLOW_RC_PASSTHRU |
186 CEC_LOG_ADDRS_FL_CDC_ONLY;
187 mutex_lock(&adap->lock);
188 if (!adap->is_configuring &&
189 (!log_addrs.num_log_addrs || !adap->is_configured) &&
190 !cec_is_busy(adap, fh)) {
191 err = __cec_s_log_addrs(adap, &log_addrs, block);
192 if (!err)
193 log_addrs = adap->log_addrs;
194 }
195 mutex_unlock(&adap->lock);
196 if (err)
197 return err;
198 if (copy_to_user(parg, &log_addrs, sizeof(log_addrs)))
199 return -EFAULT;
200 return 0;
201 }
202
203 static long cec_transmit(struct cec_adapter *adap, struct cec_fh *fh,
204 bool block, struct cec_msg __user *parg)
205 {
206 struct cec_msg msg = {};
207 long err = 0;
208
209 if (!(adap->capabilities & CEC_CAP_TRANSMIT))
210 return -ENOTTY;
211 if (copy_from_user(&msg, parg, sizeof(msg)))
212 return -EFAULT;
213
214 /* A CDC-Only device can only send CDC messages */
215 if ((adap->log_addrs.flags & CEC_LOG_ADDRS_FL_CDC_ONLY) &&
216 (msg.len == 1 || msg.msg[1] != CEC_MSG_CDC_MESSAGE))
217 return -EINVAL;
218
219 mutex_lock(&adap->lock);
220 if (adap->log_addrs.num_log_addrs == 0)
221 err = -EPERM;
222 else if (adap->is_configuring)
223 err = -ENONET;
224 else if (!adap->is_configured &&
225 (adap->needs_hpd || msg.msg[0] != 0xf0))
226 err = -ENONET;
227 else if (cec_is_busy(adap, fh))
228 err = -EBUSY;
229 else
230 err = cec_transmit_msg_fh(adap, &msg, fh, block);
231 mutex_unlock(&adap->lock);
232 if (err)
233 return err;
234 if (copy_to_user(parg, &msg, sizeof(msg)))
235 return -EFAULT;
236 return 0;
237 }
238
239 /* Called by CEC_RECEIVE: wait for a message to arrive */
240 static int cec_receive_msg(struct cec_fh *fh, struct cec_msg *msg, bool block)
241 {
242 u32 timeout = msg->timeout;
243 int res;
244
245 do {
246 mutex_lock(&fh->lock);
247 /* Are there received messages queued up? */
248 if (fh->queued_msgs) {
249 /* Yes, return the first one */
250 struct cec_msg_entry *entry =
251 list_first_entry(&fh->msgs,
252 struct cec_msg_entry, list);
253
254 list_del(&entry->list);
255 *msg = entry->msg;
256 kfree(entry);
257 fh->queued_msgs--;
258 mutex_unlock(&fh->lock);
259 /* restore original timeout value */
260 msg->timeout = timeout;
261 return 0;
262 }
263
264 /* No, return EAGAIN in non-blocking mode or wait */
265 mutex_unlock(&fh->lock);
266
267 /* Return when in non-blocking mode */
268 if (!block)
269 return -EAGAIN;
270
271 if (msg->timeout) {
272 /* The user specified a timeout */
273 res = wait_event_interruptible_timeout(fh->wait,
274 fh->queued_msgs,
275 msecs_to_jiffies(msg->timeout));
276 if (res == 0)
277 res = -ETIMEDOUT;
278 else if (res > 0)
279 res = 0;
280 } else {
281 /* Wait indefinitely */
282 res = wait_event_interruptible(fh->wait,
283 fh->queued_msgs);
284 }
285 /* Exit on error, otherwise loop to get the new message */
286 } while (!res);
287 return res;
288 }
289
290 static long cec_receive(struct cec_adapter *adap, struct cec_fh *fh,
291 bool block, struct cec_msg __user *parg)
292 {
293 struct cec_msg msg = {};
294 long err;
295
296 if (copy_from_user(&msg, parg, sizeof(msg)))
297 return -EFAULT;
298
299 err = cec_receive_msg(fh, &msg, block);
300 if (err)
301 return err;
302 msg.flags = 0;
303 if (copy_to_user(parg, &msg, sizeof(msg)))
304 return -EFAULT;
305 return 0;
306 }
307
308 static long cec_dqevent(struct cec_adapter *adap, struct cec_fh *fh,
309 bool block, struct cec_event __user *parg)
310 {
311 struct cec_event_entry *ev = NULL;
312 u64 ts = ~0ULL;
313 unsigned int i;
314 unsigned int ev_idx;
315 long err = 0;
316
317 mutex_lock(&fh->lock);
318 while (!fh->total_queued_events && block) {
319 mutex_unlock(&fh->lock);
320 err = wait_event_interruptible(fh->wait,
321 fh->total_queued_events);
322 if (err)
323 return err;
324 mutex_lock(&fh->lock);
325 }
326
327 /* Find the oldest event */
328 for (i = 0; i < CEC_NUM_EVENTS; i++) {
329 struct cec_event_entry *entry =
330 list_first_entry_or_null(&fh->events[i],
331 struct cec_event_entry, list);
332
333 if (entry && entry->ev.ts <= ts) {
334 ev = entry;
335 ev_idx = i;
336 ts = ev->ev.ts;
337 }
338 }
339
340 if (!ev) {
341 err = -EAGAIN;
342 goto unlock;
343 }
344 list_del(&ev->list);
345
346 if (copy_to_user(parg, &ev->ev, sizeof(ev->ev)))
347 err = -EFAULT;
348 if (ev_idx >= CEC_NUM_CORE_EVENTS)
349 kfree(ev);
350 fh->queued_events[ev_idx]--;
351 fh->total_queued_events--;
352
353 unlock:
354 mutex_unlock(&fh->lock);
355 return err;
356 }
357
358 static long cec_g_mode(struct cec_adapter *adap, struct cec_fh *fh,
359 u32 __user *parg)
360 {
361 u32 mode = fh->mode_initiator | fh->mode_follower;
362
363 if (copy_to_user(parg, &mode, sizeof(mode)))
364 return -EFAULT;
365 return 0;
366 }
367
368 static long cec_s_mode(struct cec_adapter *adap, struct cec_fh *fh,
369 u32 __user *parg)
370 {
371 u32 mode;
372 u8 mode_initiator;
373 u8 mode_follower;
374 long err = 0;
375
376 if (copy_from_user(&mode, parg, sizeof(mode)))
377 return -EFAULT;
378 if (mode & ~(CEC_MODE_INITIATOR_MSK | CEC_MODE_FOLLOWER_MSK)) {
379 dprintk(1, "%s: invalid mode bits set\n", __func__);
380 return -EINVAL;
381 }
382
383 mode_initiator = mode & CEC_MODE_INITIATOR_MSK;
384 mode_follower = mode & CEC_MODE_FOLLOWER_MSK;
385
386 if (mode_initiator > CEC_MODE_EXCL_INITIATOR ||
387 mode_follower > CEC_MODE_MONITOR_ALL) {
388 dprintk(1, "%s: unknown mode\n", __func__);
389 return -EINVAL;
390 }
391
392 if (mode_follower == CEC_MODE_MONITOR_ALL &&
393 !(adap->capabilities & CEC_CAP_MONITOR_ALL)) {
394 dprintk(1, "%s: MONITOR_ALL not supported\n", __func__);
395 return -EINVAL;
396 }
397
398 if (mode_follower == CEC_MODE_MONITOR_PIN &&
399 !(adap->capabilities & CEC_CAP_MONITOR_PIN)) {
400 dprintk(1, "%s: MONITOR_PIN not supported\n", __func__);
401 return -EINVAL;
402 }
403
404 /* Follower modes should always be able to send CEC messages */
405 if ((mode_initiator == CEC_MODE_NO_INITIATOR ||
406 !(adap->capabilities & CEC_CAP_TRANSMIT)) &&
407 mode_follower >= CEC_MODE_FOLLOWER &&
408 mode_follower <= CEC_MODE_EXCL_FOLLOWER_PASSTHRU) {
409 dprintk(1, "%s: cannot transmit\n", __func__);
410 return -EINVAL;
411 }
412
413 /* Monitor modes require CEC_MODE_NO_INITIATOR */
414 if (mode_initiator && mode_follower >= CEC_MODE_MONITOR_PIN) {
415 dprintk(1, "%s: monitor modes require NO_INITIATOR\n",
416 __func__);
417 return -EINVAL;
418 }
419
420 /* Monitor modes require CAP_NET_ADMIN */
421 if (mode_follower >= CEC_MODE_MONITOR_PIN && !capable(CAP_NET_ADMIN))
422 return -EPERM;
423
424 mutex_lock(&adap->lock);
425 /*
426 * You can't become exclusive follower if someone else already
427 * has that job.
428 */
429 if ((mode_follower == CEC_MODE_EXCL_FOLLOWER ||
430 mode_follower == CEC_MODE_EXCL_FOLLOWER_PASSTHRU) &&
431 adap->cec_follower && adap->cec_follower != fh)
432 err = -EBUSY;
433 /*
434 * You can't become exclusive initiator if someone else already
435 * has that job.
436 */
437 if (mode_initiator == CEC_MODE_EXCL_INITIATOR &&
438 adap->cec_initiator && adap->cec_initiator != fh)
439 err = -EBUSY;
440
441 if (!err) {
442 bool old_mon_all = fh->mode_follower == CEC_MODE_MONITOR_ALL;
443 bool new_mon_all = mode_follower == CEC_MODE_MONITOR_ALL;
444
445 if (old_mon_all != new_mon_all) {
446 if (new_mon_all)
447 err = cec_monitor_all_cnt_inc(adap);
448 else
449 cec_monitor_all_cnt_dec(adap);
450 }
451 }
452
453 if (err) {
454 mutex_unlock(&adap->lock);
455 return err;
456 }
457
458 if (fh->mode_follower == CEC_MODE_FOLLOWER)
459 adap->follower_cnt--;
460 if (fh->mode_follower == CEC_MODE_MONITOR_PIN)
461 adap->monitor_pin_cnt--;
462 if (mode_follower == CEC_MODE_FOLLOWER)
463 adap->follower_cnt++;
464 if (mode_follower == CEC_MODE_MONITOR_PIN) {
465 struct cec_event ev = {
466 .flags = CEC_EVENT_FL_INITIAL_STATE,
467 };
468
469 ev.event = adap->cec_pin_is_high ? CEC_EVENT_PIN_CEC_HIGH :
470 CEC_EVENT_PIN_CEC_LOW;
471 cec_queue_event_fh(fh, &ev, 0);
472 adap->monitor_pin_cnt++;
473 }
474 if (mode_follower == CEC_MODE_EXCL_FOLLOWER ||
475 mode_follower == CEC_MODE_EXCL_FOLLOWER_PASSTHRU) {
476 adap->passthrough =
477 mode_follower == CEC_MODE_EXCL_FOLLOWER_PASSTHRU;
478 adap->cec_follower = fh;
479 } else if (adap->cec_follower == fh) {
480 adap->passthrough = false;
481 adap->cec_follower = NULL;
482 }
483 if (mode_initiator == CEC_MODE_EXCL_INITIATOR)
484 adap->cec_initiator = fh;
485 else if (adap->cec_initiator == fh)
486 adap->cec_initiator = NULL;
487 fh->mode_initiator = mode_initiator;
488 fh->mode_follower = mode_follower;
489 mutex_unlock(&adap->lock);
490 return 0;
491 }
492
493 static long cec_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
494 {
495 struct cec_devnode *devnode = cec_devnode_data(filp);
496 struct cec_fh *fh = filp->private_data;
497 struct cec_adapter *adap = fh->adap;
498 bool block = !(filp->f_flags & O_NONBLOCK);
499 void __user *parg = (void __user *)arg;
500
501 if (!devnode->registered)
502 return -ENODEV;
503
504 switch (cmd) {
505 case CEC_ADAP_G_CAPS:
506 return cec_adap_g_caps(adap, parg);
507
508 case CEC_ADAP_G_PHYS_ADDR:
509 return cec_adap_g_phys_addr(adap, parg);
510
511 case CEC_ADAP_S_PHYS_ADDR:
512 return cec_adap_s_phys_addr(adap, fh, block, parg);
513
514 case CEC_ADAP_G_LOG_ADDRS:
515 return cec_adap_g_log_addrs(adap, parg);
516
517 case CEC_ADAP_S_LOG_ADDRS:
518 return cec_adap_s_log_addrs(adap, fh, block, parg);
519
520 case CEC_TRANSMIT:
521 return cec_transmit(adap, fh, block, parg);
522
523 case CEC_RECEIVE:
524 return cec_receive(adap, fh, block, parg);
525
526 case CEC_DQEVENT:
527 return cec_dqevent(adap, fh, block, parg);
528
529 case CEC_G_MODE:
530 return cec_g_mode(adap, fh, parg);
531
532 case CEC_S_MODE:
533 return cec_s_mode(adap, fh, parg);
534
535 default:
536 return -ENOTTY;
537 }
538 }
539
540 static int cec_open(struct inode *inode, struct file *filp)
541 {
542 struct cec_devnode *devnode =
543 container_of(inode->i_cdev, struct cec_devnode, cdev);
544 struct cec_adapter *adap = to_cec_adapter(devnode);
545 struct cec_fh *fh = kzalloc(sizeof(*fh), GFP_KERNEL);
546 /*
547 * Initial events that are automatically sent when the cec device is
548 * opened.
549 */
550 struct cec_event ev = {
551 .event = CEC_EVENT_STATE_CHANGE,
552 .flags = CEC_EVENT_FL_INITIAL_STATE,
553 };
554 unsigned int i;
555 int err;
556
557 if (!fh)
558 return -ENOMEM;
559
560 INIT_LIST_HEAD(&fh->msgs);
561 INIT_LIST_HEAD(&fh->xfer_list);
562 for (i = 0; i < CEC_NUM_EVENTS; i++)
563 INIT_LIST_HEAD(&fh->events[i]);
564 mutex_init(&fh->lock);
565 init_waitqueue_head(&fh->wait);
566
567 fh->mode_initiator = CEC_MODE_INITIATOR;
568 fh->adap = adap;
569
570 err = cec_get_device(devnode);
571 if (err) {
572 kfree(fh);
573 return err;
574 }
575
576 mutex_lock(&devnode->lock);
577 if (list_empty(&devnode->fhs) &&
578 !adap->needs_hpd &&
579 adap->phys_addr == CEC_PHYS_ADDR_INVALID) {
580 err = adap->ops->adap_enable(adap, true);
581 if (err) {
582 mutex_unlock(&devnode->lock);
583 kfree(fh);
584 return err;
585 }
586 }
587 filp->private_data = fh;
588
589 /* Queue up initial state events */
590 ev.state_change.phys_addr = adap->phys_addr;
591 ev.state_change.log_addr_mask = adap->log_addrs.log_addr_mask;
592 cec_queue_event_fh(fh, &ev, 0);
593 #ifdef CONFIG_CEC_PIN
594 if (adap->pin && adap->pin->ops->read_hpd) {
595 err = adap->pin->ops->read_hpd(adap);
596 if (err >= 0) {
597 ev.event = err ? CEC_EVENT_PIN_HPD_HIGH :
598 CEC_EVENT_PIN_HPD_LOW;
599 cec_queue_event_fh(fh, &ev, 0);
600 }
601 }
602 #endif
603
604 list_add(&fh->list, &devnode->fhs);
605 mutex_unlock(&devnode->lock);
606
607 return 0;
608 }
609
610 /* Override for the release function */
611 static int cec_release(struct inode *inode, struct file *filp)
612 {
613 struct cec_devnode *devnode = cec_devnode_data(filp);
614 struct cec_adapter *adap = to_cec_adapter(devnode);
615 struct cec_fh *fh = filp->private_data;
616 unsigned int i;
617
618 mutex_lock(&adap->lock);
619 if (adap->cec_initiator == fh)
620 adap->cec_initiator = NULL;
621 if (adap->cec_follower == fh) {
622 adap->cec_follower = NULL;
623 adap->passthrough = false;
624 }
625 if (fh->mode_follower == CEC_MODE_FOLLOWER)
626 adap->follower_cnt--;
627 if (fh->mode_follower == CEC_MODE_MONITOR_PIN)
628 adap->monitor_pin_cnt--;
629 if (fh->mode_follower == CEC_MODE_MONITOR_ALL)
630 cec_monitor_all_cnt_dec(adap);
631 mutex_unlock(&adap->lock);
632
633 mutex_lock(&devnode->lock);
634 list_del(&fh->list);
635 if (list_empty(&devnode->fhs) &&
636 !adap->needs_hpd &&
637 adap->phys_addr == CEC_PHYS_ADDR_INVALID) {
638 WARN_ON(adap->ops->adap_enable(adap, false));
639 }
640 mutex_unlock(&devnode->lock);
641
642 /* Unhook pending transmits from this filehandle. */
643 mutex_lock(&adap->lock);
644 while (!list_empty(&fh->xfer_list)) {
645 struct cec_data *data =
646 list_first_entry(&fh->xfer_list, struct cec_data, xfer_list);
647
648 data->blocking = false;
649 data->fh = NULL;
650 list_del(&data->xfer_list);
651 }
652 mutex_unlock(&adap->lock);
653 while (!list_empty(&fh->msgs)) {
654 struct cec_msg_entry *entry =
655 list_first_entry(&fh->msgs, struct cec_msg_entry, list);
656
657 list_del(&entry->list);
658 kfree(entry);
659 }
660 for (i = CEC_NUM_CORE_EVENTS; i < CEC_NUM_EVENTS; i++) {
661 while (!list_empty(&fh->events[i])) {
662 struct cec_event_entry *entry =
663 list_first_entry(&fh->events[i],
664 struct cec_event_entry, list);
665
666 list_del(&entry->list);
667 kfree(entry);
668 }
669 }
670 kfree(fh);
671
672 cec_put_device(devnode);
673 filp->private_data = NULL;
674 return 0;
675 }
676
677 const struct file_operations cec_devnode_fops = {
678 .owner = THIS_MODULE,
679 .open = cec_open,
680 .unlocked_ioctl = cec_ioctl,
681 .release = cec_release,
682 .poll = cec_poll,
683 .llseek = no_llseek,
684 };