Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
fork

Configure Feed

Select the types of activity you want to include in your feed.

at v4.15-rc1 667 lines 17 kB view raw
1/* 2 * cec-api.c - HDMI Consumer Electronics Control framework - API 3 * 4 * Copyright 2016 Cisco Systems, Inc. and/or its affiliates. All rights reserved. 5 * 6 * This program is free software; you may redistribute it and/or modify 7 * it under the terms of the GNU General Public License as published by 8 * the Free Software Foundation; version 2 of the License. 9 * 10 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 11 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 12 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 13 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 14 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 15 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 16 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 17 * SOFTWARE. 18 */ 19 20#include <linux/errno.h> 21#include <linux/init.h> 22#include <linux/module.h> 23#include <linux/kernel.h> 24#include <linux/kmod.h> 25#include <linux/ktime.h> 26#include <linux/slab.h> 27#include <linux/mm.h> 28#include <linux/string.h> 29#include <linux/types.h> 30#include <linux/uaccess.h> 31#include <linux/version.h> 32 33#include <media/cec-pin.h> 34#include "cec-priv.h" 35#include "cec-pin-priv.h" 36 37static inline struct cec_devnode *cec_devnode_data(struct file *filp) 38{ 39 struct cec_fh *fh = filp->private_data; 40 41 return &fh->adap->devnode; 42} 43 44/* CEC file operations */ 45 46static unsigned int cec_poll(struct file *filp, 47 struct poll_table_struct *poll) 48{ 49 struct cec_devnode *devnode = cec_devnode_data(filp); 50 struct cec_fh *fh = filp->private_data; 51 struct cec_adapter *adap = fh->adap; 52 unsigned int res = 0; 53 54 if (!devnode->registered) 55 return POLLERR | POLLHUP; 56 mutex_lock(&adap->lock); 57 if (adap->is_configured && 58 adap->transmit_queue_sz < CEC_MAX_MSG_TX_QUEUE_SZ) 59 res |= POLLOUT | POLLWRNORM; 60 if (fh->queued_msgs) 61 res |= POLLIN | POLLRDNORM; 62 if (fh->total_queued_events) 63 res |= POLLPRI; 64 poll_wait(filp, &fh->wait, poll); 65 mutex_unlock(&adap->lock); 66 return res; 67} 68 69static bool cec_is_busy(const struct cec_adapter *adap, 70 const struct cec_fh *fh) 71{ 72 bool valid_initiator = adap->cec_initiator && adap->cec_initiator == fh; 73 bool valid_follower = adap->cec_follower && adap->cec_follower == fh; 74 75 /* 76 * Exclusive initiators and followers can always access the CEC adapter 77 */ 78 if (valid_initiator || valid_follower) 79 return false; 80 /* 81 * All others can only access the CEC adapter if there is no 82 * exclusive initiator and they are in INITIATOR mode. 83 */ 84 return adap->cec_initiator || 85 fh->mode_initiator == CEC_MODE_NO_INITIATOR; 86} 87 88static long cec_adap_g_caps(struct cec_adapter *adap, 89 struct cec_caps __user *parg) 90{ 91 struct cec_caps caps = {}; 92 93 strlcpy(caps.driver, adap->devnode.dev.parent->driver->name, 94 sizeof(caps.driver)); 95 strlcpy(caps.name, adap->name, sizeof(caps.name)); 96 caps.available_log_addrs = adap->available_log_addrs; 97 caps.capabilities = adap->capabilities; 98 caps.version = LINUX_VERSION_CODE; 99 if (copy_to_user(parg, &caps, sizeof(caps))) 100 return -EFAULT; 101 return 0; 102} 103 104static long cec_adap_g_phys_addr(struct cec_adapter *adap, 105 __u16 __user *parg) 106{ 107 u16 phys_addr; 108 109 mutex_lock(&adap->lock); 110 phys_addr = adap->phys_addr; 111 mutex_unlock(&adap->lock); 112 if (copy_to_user(parg, &phys_addr, sizeof(phys_addr))) 113 return -EFAULT; 114 return 0; 115} 116 117static long cec_adap_s_phys_addr(struct cec_adapter *adap, struct cec_fh *fh, 118 bool block, __u16 __user *parg) 119{ 120 u16 phys_addr; 121 long err; 122 123 if (!(adap->capabilities & CEC_CAP_PHYS_ADDR)) 124 return -ENOTTY; 125 if (copy_from_user(&phys_addr, parg, sizeof(phys_addr))) 126 return -EFAULT; 127 128 err = cec_phys_addr_validate(phys_addr, NULL, NULL); 129 if (err) 130 return err; 131 mutex_lock(&adap->lock); 132 if (cec_is_busy(adap, fh)) 133 err = -EBUSY; 134 else 135 __cec_s_phys_addr(adap, phys_addr, block); 136 mutex_unlock(&adap->lock); 137 return err; 138} 139 140static long cec_adap_g_log_addrs(struct cec_adapter *adap, 141 struct cec_log_addrs __user *parg) 142{ 143 struct cec_log_addrs log_addrs; 144 145 mutex_lock(&adap->lock); 146 log_addrs = adap->log_addrs; 147 if (!adap->is_configured) 148 memset(log_addrs.log_addr, CEC_LOG_ADDR_INVALID, 149 sizeof(log_addrs.log_addr)); 150 mutex_unlock(&adap->lock); 151 152 if (copy_to_user(parg, &log_addrs, sizeof(log_addrs))) 153 return -EFAULT; 154 return 0; 155} 156 157static long cec_adap_s_log_addrs(struct cec_adapter *adap, struct cec_fh *fh, 158 bool block, struct cec_log_addrs __user *parg) 159{ 160 struct cec_log_addrs log_addrs; 161 long err = -EBUSY; 162 163 if (!(adap->capabilities & CEC_CAP_LOG_ADDRS)) 164 return -ENOTTY; 165 if (copy_from_user(&log_addrs, parg, sizeof(log_addrs))) 166 return -EFAULT; 167 log_addrs.flags &= CEC_LOG_ADDRS_FL_ALLOW_UNREG_FALLBACK | 168 CEC_LOG_ADDRS_FL_ALLOW_RC_PASSTHRU | 169 CEC_LOG_ADDRS_FL_CDC_ONLY; 170 mutex_lock(&adap->lock); 171 if (!adap->is_configuring && 172 (!log_addrs.num_log_addrs || !adap->is_configured) && 173 !cec_is_busy(adap, fh)) { 174 err = __cec_s_log_addrs(adap, &log_addrs, block); 175 if (!err) 176 log_addrs = adap->log_addrs; 177 } 178 mutex_unlock(&adap->lock); 179 if (err) 180 return err; 181 if (copy_to_user(parg, &log_addrs, sizeof(log_addrs))) 182 return -EFAULT; 183 return 0; 184} 185 186static long cec_transmit(struct cec_adapter *adap, struct cec_fh *fh, 187 bool block, struct cec_msg __user *parg) 188{ 189 struct cec_msg msg = {}; 190 long err = 0; 191 192 if (!(adap->capabilities & CEC_CAP_TRANSMIT)) 193 return -ENOTTY; 194 if (copy_from_user(&msg, parg, sizeof(msg))) 195 return -EFAULT; 196 197 /* A CDC-Only device can only send CDC messages */ 198 if ((adap->log_addrs.flags & CEC_LOG_ADDRS_FL_CDC_ONLY) && 199 (msg.len == 1 || msg.msg[1] != CEC_MSG_CDC_MESSAGE)) 200 return -EINVAL; 201 202 mutex_lock(&adap->lock); 203 if (adap->log_addrs.num_log_addrs == 0) 204 err = -EPERM; 205 else if (adap->is_configuring) 206 err = -ENONET; 207 else if (!adap->is_configured && 208 (adap->needs_hpd || msg.msg[0] != 0xf0)) 209 err = -ENONET; 210 else if (cec_is_busy(adap, fh)) 211 err = -EBUSY; 212 else 213 err = cec_transmit_msg_fh(adap, &msg, fh, block); 214 mutex_unlock(&adap->lock); 215 if (err) 216 return err; 217 if (copy_to_user(parg, &msg, sizeof(msg))) 218 return -EFAULT; 219 return 0; 220} 221 222/* Called by CEC_RECEIVE: wait for a message to arrive */ 223static int cec_receive_msg(struct cec_fh *fh, struct cec_msg *msg, bool block) 224{ 225 u32 timeout = msg->timeout; 226 int res; 227 228 do { 229 mutex_lock(&fh->lock); 230 /* Are there received messages queued up? */ 231 if (fh->queued_msgs) { 232 /* Yes, return the first one */ 233 struct cec_msg_entry *entry = 234 list_first_entry(&fh->msgs, 235 struct cec_msg_entry, list); 236 237 list_del(&entry->list); 238 *msg = entry->msg; 239 kfree(entry); 240 fh->queued_msgs--; 241 mutex_unlock(&fh->lock); 242 /* restore original timeout value */ 243 msg->timeout = timeout; 244 return 0; 245 } 246 247 /* No, return EAGAIN in non-blocking mode or wait */ 248 mutex_unlock(&fh->lock); 249 250 /* Return when in non-blocking mode */ 251 if (!block) 252 return -EAGAIN; 253 254 if (msg->timeout) { 255 /* The user specified a timeout */ 256 res = wait_event_interruptible_timeout(fh->wait, 257 fh->queued_msgs, 258 msecs_to_jiffies(msg->timeout)); 259 if (res == 0) 260 res = -ETIMEDOUT; 261 else if (res > 0) 262 res = 0; 263 } else { 264 /* Wait indefinitely */ 265 res = wait_event_interruptible(fh->wait, 266 fh->queued_msgs); 267 } 268 /* Exit on error, otherwise loop to get the new message */ 269 } while (!res); 270 return res; 271} 272 273static long cec_receive(struct cec_adapter *adap, struct cec_fh *fh, 274 bool block, struct cec_msg __user *parg) 275{ 276 struct cec_msg msg = {}; 277 long err; 278 279 if (copy_from_user(&msg, parg, sizeof(msg))) 280 return -EFAULT; 281 282 err = cec_receive_msg(fh, &msg, block); 283 if (err) 284 return err; 285 msg.flags = 0; 286 if (copy_to_user(parg, &msg, sizeof(msg))) 287 return -EFAULT; 288 return 0; 289} 290 291static long cec_dqevent(struct cec_adapter *adap, struct cec_fh *fh, 292 bool block, struct cec_event __user *parg) 293{ 294 struct cec_event_entry *ev = NULL; 295 u64 ts = ~0ULL; 296 unsigned int i; 297 unsigned int ev_idx; 298 long err = 0; 299 300 mutex_lock(&fh->lock); 301 while (!fh->total_queued_events && block) { 302 mutex_unlock(&fh->lock); 303 err = wait_event_interruptible(fh->wait, 304 fh->total_queued_events); 305 if (err) 306 return err; 307 mutex_lock(&fh->lock); 308 } 309 310 /* Find the oldest event */ 311 for (i = 0; i < CEC_NUM_EVENTS; i++) { 312 struct cec_event_entry *entry = 313 list_first_entry_or_null(&fh->events[i], 314 struct cec_event_entry, list); 315 316 if (entry && entry->ev.ts <= ts) { 317 ev = entry; 318 ev_idx = i; 319 ts = ev->ev.ts; 320 } 321 } 322 323 if (!ev) { 324 err = -EAGAIN; 325 goto unlock; 326 } 327 list_del(&ev->list); 328 329 if (copy_to_user(parg, &ev->ev, sizeof(ev->ev))) 330 err = -EFAULT; 331 if (ev_idx >= CEC_NUM_CORE_EVENTS) 332 kfree(ev); 333 fh->queued_events[ev_idx]--; 334 fh->total_queued_events--; 335 336unlock: 337 mutex_unlock(&fh->lock); 338 return err; 339} 340 341static long cec_g_mode(struct cec_adapter *adap, struct cec_fh *fh, 342 u32 __user *parg) 343{ 344 u32 mode = fh->mode_initiator | fh->mode_follower; 345 346 if (copy_to_user(parg, &mode, sizeof(mode))) 347 return -EFAULT; 348 return 0; 349} 350 351static long cec_s_mode(struct cec_adapter *adap, struct cec_fh *fh, 352 u32 __user *parg) 353{ 354 u32 mode; 355 u8 mode_initiator; 356 u8 mode_follower; 357 long err = 0; 358 359 if (copy_from_user(&mode, parg, sizeof(mode))) 360 return -EFAULT; 361 if (mode & ~(CEC_MODE_INITIATOR_MSK | CEC_MODE_FOLLOWER_MSK)) { 362 dprintk(1, "%s: invalid mode bits set\n", __func__); 363 return -EINVAL; 364 } 365 366 mode_initiator = mode & CEC_MODE_INITIATOR_MSK; 367 mode_follower = mode & CEC_MODE_FOLLOWER_MSK; 368 369 if (mode_initiator > CEC_MODE_EXCL_INITIATOR || 370 mode_follower > CEC_MODE_MONITOR_ALL) { 371 dprintk(1, "%s: unknown mode\n", __func__); 372 return -EINVAL; 373 } 374 375 if (mode_follower == CEC_MODE_MONITOR_ALL && 376 !(adap->capabilities & CEC_CAP_MONITOR_ALL)) { 377 dprintk(1, "%s: MONITOR_ALL not supported\n", __func__); 378 return -EINVAL; 379 } 380 381 if (mode_follower == CEC_MODE_MONITOR_PIN && 382 !(adap->capabilities & CEC_CAP_MONITOR_PIN)) { 383 dprintk(1, "%s: MONITOR_PIN not supported\n", __func__); 384 return -EINVAL; 385 } 386 387 /* Follower modes should always be able to send CEC messages */ 388 if ((mode_initiator == CEC_MODE_NO_INITIATOR || 389 !(adap->capabilities & CEC_CAP_TRANSMIT)) && 390 mode_follower >= CEC_MODE_FOLLOWER && 391 mode_follower <= CEC_MODE_EXCL_FOLLOWER_PASSTHRU) { 392 dprintk(1, "%s: cannot transmit\n", __func__); 393 return -EINVAL; 394 } 395 396 /* Monitor modes require CEC_MODE_NO_INITIATOR */ 397 if (mode_initiator && mode_follower >= CEC_MODE_MONITOR_PIN) { 398 dprintk(1, "%s: monitor modes require NO_INITIATOR\n", 399 __func__); 400 return -EINVAL; 401 } 402 403 /* Monitor modes require CAP_NET_ADMIN */ 404 if (mode_follower >= CEC_MODE_MONITOR_PIN && !capable(CAP_NET_ADMIN)) 405 return -EPERM; 406 407 mutex_lock(&adap->lock); 408 /* 409 * You can't become exclusive follower if someone else already 410 * has that job. 411 */ 412 if ((mode_follower == CEC_MODE_EXCL_FOLLOWER || 413 mode_follower == CEC_MODE_EXCL_FOLLOWER_PASSTHRU) && 414 adap->cec_follower && adap->cec_follower != fh) 415 err = -EBUSY; 416 /* 417 * You can't become exclusive initiator if someone else already 418 * has that job. 419 */ 420 if (mode_initiator == CEC_MODE_EXCL_INITIATOR && 421 adap->cec_initiator && adap->cec_initiator != fh) 422 err = -EBUSY; 423 424 if (!err) { 425 bool old_mon_all = fh->mode_follower == CEC_MODE_MONITOR_ALL; 426 bool new_mon_all = mode_follower == CEC_MODE_MONITOR_ALL; 427 428 if (old_mon_all != new_mon_all) { 429 if (new_mon_all) 430 err = cec_monitor_all_cnt_inc(adap); 431 else 432 cec_monitor_all_cnt_dec(adap); 433 } 434 } 435 436 if (err) { 437 mutex_unlock(&adap->lock); 438 return err; 439 } 440 441 if (fh->mode_follower == CEC_MODE_FOLLOWER) 442 adap->follower_cnt--; 443 if (fh->mode_follower == CEC_MODE_MONITOR_PIN) 444 adap->monitor_pin_cnt--; 445 if (mode_follower == CEC_MODE_FOLLOWER) 446 adap->follower_cnt++; 447 if (mode_follower == CEC_MODE_MONITOR_PIN) { 448 struct cec_event ev = { 449 .flags = CEC_EVENT_FL_INITIAL_STATE, 450 }; 451 452 ev.event = adap->cec_pin_is_high ? CEC_EVENT_PIN_CEC_HIGH : 453 CEC_EVENT_PIN_CEC_LOW; 454 cec_queue_event_fh(fh, &ev, 0); 455 adap->monitor_pin_cnt++; 456 } 457 if (mode_follower == CEC_MODE_EXCL_FOLLOWER || 458 mode_follower == CEC_MODE_EXCL_FOLLOWER_PASSTHRU) { 459 adap->passthrough = 460 mode_follower == CEC_MODE_EXCL_FOLLOWER_PASSTHRU; 461 adap->cec_follower = fh; 462 } else if (adap->cec_follower == fh) { 463 adap->passthrough = false; 464 adap->cec_follower = NULL; 465 } 466 if (mode_initiator == CEC_MODE_EXCL_INITIATOR) 467 adap->cec_initiator = fh; 468 else if (adap->cec_initiator == fh) 469 adap->cec_initiator = NULL; 470 fh->mode_initiator = mode_initiator; 471 fh->mode_follower = mode_follower; 472 mutex_unlock(&adap->lock); 473 return 0; 474} 475 476static long cec_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) 477{ 478 struct cec_devnode *devnode = cec_devnode_data(filp); 479 struct cec_fh *fh = filp->private_data; 480 struct cec_adapter *adap = fh->adap; 481 bool block = !(filp->f_flags & O_NONBLOCK); 482 void __user *parg = (void __user *)arg; 483 484 if (!devnode->registered) 485 return -ENODEV; 486 487 switch (cmd) { 488 case CEC_ADAP_G_CAPS: 489 return cec_adap_g_caps(adap, parg); 490 491 case CEC_ADAP_G_PHYS_ADDR: 492 return cec_adap_g_phys_addr(adap, parg); 493 494 case CEC_ADAP_S_PHYS_ADDR: 495 return cec_adap_s_phys_addr(adap, fh, block, parg); 496 497 case CEC_ADAP_G_LOG_ADDRS: 498 return cec_adap_g_log_addrs(adap, parg); 499 500 case CEC_ADAP_S_LOG_ADDRS: 501 return cec_adap_s_log_addrs(adap, fh, block, parg); 502 503 case CEC_TRANSMIT: 504 return cec_transmit(adap, fh, block, parg); 505 506 case CEC_RECEIVE: 507 return cec_receive(adap, fh, block, parg); 508 509 case CEC_DQEVENT: 510 return cec_dqevent(adap, fh, block, parg); 511 512 case CEC_G_MODE: 513 return cec_g_mode(adap, fh, parg); 514 515 case CEC_S_MODE: 516 return cec_s_mode(adap, fh, parg); 517 518 default: 519 return -ENOTTY; 520 } 521} 522 523static int cec_open(struct inode *inode, struct file *filp) 524{ 525 struct cec_devnode *devnode = 526 container_of(inode->i_cdev, struct cec_devnode, cdev); 527 struct cec_adapter *adap = to_cec_adapter(devnode); 528 struct cec_fh *fh = kzalloc(sizeof(*fh), GFP_KERNEL); 529 /* 530 * Initial events that are automatically sent when the cec device is 531 * opened. 532 */ 533 struct cec_event ev = { 534 .event = CEC_EVENT_STATE_CHANGE, 535 .flags = CEC_EVENT_FL_INITIAL_STATE, 536 }; 537 unsigned int i; 538 int err; 539 540 if (!fh) 541 return -ENOMEM; 542 543 INIT_LIST_HEAD(&fh->msgs); 544 INIT_LIST_HEAD(&fh->xfer_list); 545 for (i = 0; i < CEC_NUM_EVENTS; i++) 546 INIT_LIST_HEAD(&fh->events[i]); 547 mutex_init(&fh->lock); 548 init_waitqueue_head(&fh->wait); 549 550 fh->mode_initiator = CEC_MODE_INITIATOR; 551 fh->adap = adap; 552 553 err = cec_get_device(devnode); 554 if (err) { 555 kfree(fh); 556 return err; 557 } 558 559 mutex_lock(&devnode->lock); 560 if (list_empty(&devnode->fhs) && 561 !adap->needs_hpd && 562 adap->phys_addr == CEC_PHYS_ADDR_INVALID) { 563 err = adap->ops->adap_enable(adap, true); 564 if (err) { 565 mutex_unlock(&devnode->lock); 566 kfree(fh); 567 return err; 568 } 569 } 570 filp->private_data = fh; 571 572 /* Queue up initial state events */ 573 ev.state_change.phys_addr = adap->phys_addr; 574 ev.state_change.log_addr_mask = adap->log_addrs.log_addr_mask; 575 cec_queue_event_fh(fh, &ev, 0); 576#ifdef CONFIG_CEC_PIN 577 if (adap->pin && adap->pin->ops->read_hpd) { 578 err = adap->pin->ops->read_hpd(adap); 579 if (err >= 0) { 580 ev.event = err ? CEC_EVENT_PIN_HPD_HIGH : 581 CEC_EVENT_PIN_HPD_LOW; 582 cec_queue_event_fh(fh, &ev, 0); 583 } 584 } 585#endif 586 587 list_add(&fh->list, &devnode->fhs); 588 mutex_unlock(&devnode->lock); 589 590 return 0; 591} 592 593/* Override for the release function */ 594static int cec_release(struct inode *inode, struct file *filp) 595{ 596 struct cec_devnode *devnode = cec_devnode_data(filp); 597 struct cec_adapter *adap = to_cec_adapter(devnode); 598 struct cec_fh *fh = filp->private_data; 599 unsigned int i; 600 601 mutex_lock(&adap->lock); 602 if (adap->cec_initiator == fh) 603 adap->cec_initiator = NULL; 604 if (adap->cec_follower == fh) { 605 adap->cec_follower = NULL; 606 adap->passthrough = false; 607 } 608 if (fh->mode_follower == CEC_MODE_FOLLOWER) 609 adap->follower_cnt--; 610 if (fh->mode_follower == CEC_MODE_MONITOR_PIN) 611 adap->monitor_pin_cnt--; 612 if (fh->mode_follower == CEC_MODE_MONITOR_ALL) 613 cec_monitor_all_cnt_dec(adap); 614 mutex_unlock(&adap->lock); 615 616 mutex_lock(&devnode->lock); 617 list_del(&fh->list); 618 if (list_empty(&devnode->fhs) && 619 !adap->needs_hpd && 620 adap->phys_addr == CEC_PHYS_ADDR_INVALID) { 621 WARN_ON(adap->ops->adap_enable(adap, false)); 622 } 623 mutex_unlock(&devnode->lock); 624 625 /* Unhook pending transmits from this filehandle. */ 626 mutex_lock(&adap->lock); 627 while (!list_empty(&fh->xfer_list)) { 628 struct cec_data *data = 629 list_first_entry(&fh->xfer_list, struct cec_data, xfer_list); 630 631 data->blocking = false; 632 data->fh = NULL; 633 list_del(&data->xfer_list); 634 } 635 mutex_unlock(&adap->lock); 636 while (!list_empty(&fh->msgs)) { 637 struct cec_msg_entry *entry = 638 list_first_entry(&fh->msgs, struct cec_msg_entry, list); 639 640 list_del(&entry->list); 641 kfree(entry); 642 } 643 for (i = CEC_NUM_CORE_EVENTS; i < CEC_NUM_EVENTS; i++) { 644 while (!list_empty(&fh->events[i])) { 645 struct cec_event_entry *entry = 646 list_first_entry(&fh->events[i], 647 struct cec_event_entry, list); 648 649 list_del(&entry->list); 650 kfree(entry); 651 } 652 } 653 kfree(fh); 654 655 cec_put_device(devnode); 656 filp->private_data = NULL; 657 return 0; 658} 659 660const struct file_operations cec_devnode_fops = { 661 .owner = THIS_MODULE, 662 .open = cec_open, 663 .unlocked_ioctl = cec_ioctl, 664 .release = cec_release, 665 .poll = cec_poll, 666 .llseek = no_llseek, 667};