at v3.8 1496 lines 36 kB view raw
1/* 2 * firmware_class.c - Multi purpose firmware loading support 3 * 4 * Copyright (c) 2003 Manuel Estrada Sainz 5 * 6 * Please see Documentation/firmware_class/ for more information. 7 * 8 */ 9 10#include <linux/capability.h> 11#include <linux/device.h> 12#include <linux/module.h> 13#include <linux/init.h> 14#include <linux/timer.h> 15#include <linux/vmalloc.h> 16#include <linux/interrupt.h> 17#include <linux/bitops.h> 18#include <linux/mutex.h> 19#include <linux/workqueue.h> 20#include <linux/highmem.h> 21#include <linux/firmware.h> 22#include <linux/slab.h> 23#include <linux/sched.h> 24#include <linux/file.h> 25#include <linux/list.h> 26#include <linux/async.h> 27#include <linux/pm.h> 28#include <linux/suspend.h> 29#include <linux/syscore_ops.h> 30 31#include <generated/utsrelease.h> 32 33#include "base.h" 34 35MODULE_AUTHOR("Manuel Estrada Sainz"); 36MODULE_DESCRIPTION("Multi purpose firmware loading support"); 37MODULE_LICENSE("GPL"); 38 39/* Builtin firmware support */ 40 41#ifdef CONFIG_FW_LOADER 42 43extern struct builtin_fw __start_builtin_fw[]; 44extern struct builtin_fw __end_builtin_fw[]; 45 46static bool fw_get_builtin_firmware(struct firmware *fw, const char *name) 47{ 48 struct builtin_fw *b_fw; 49 50 for (b_fw = __start_builtin_fw; b_fw != __end_builtin_fw; b_fw++) { 51 if (strcmp(name, b_fw->name) == 0) { 52 fw->size = b_fw->size; 53 fw->data = b_fw->data; 54 return true; 55 } 56 } 57 58 return false; 59} 60 61static bool fw_is_builtin_firmware(const struct firmware *fw) 62{ 63 struct builtin_fw *b_fw; 64 65 for (b_fw = __start_builtin_fw; b_fw != __end_builtin_fw; b_fw++) 66 if (fw->data == b_fw->data) 67 return true; 68 69 return false; 70} 71 72#else /* Module case - no builtin firmware support */ 73 74static inline bool fw_get_builtin_firmware(struct firmware *fw, const char *name) 75{ 76 return false; 77} 78 79static inline bool fw_is_builtin_firmware(const struct firmware *fw) 80{ 81 return false; 82} 83#endif 84 85enum { 86 FW_STATUS_LOADING, 87 FW_STATUS_DONE, 88 FW_STATUS_ABORT, 89}; 90 91enum fw_buf_fmt { 92 VMALLOC_BUF, /* used in direct loading */ 93 PAGE_BUF, /* used in loading via userspace */ 94}; 95 96static int loading_timeout = 60; /* In seconds */ 97 98static inline long firmware_loading_timeout(void) 99{ 100 return loading_timeout > 0 ? loading_timeout * HZ : MAX_SCHEDULE_TIMEOUT; 101} 102 103struct firmware_cache { 104 /* firmware_buf instance will be added into the below list */ 105 spinlock_t lock; 106 struct list_head head; 107 int state; 108 109#ifdef CONFIG_PM_SLEEP 110 /* 111 * Names of firmware images which have been cached successfully 112 * will be added into the below list so that device uncache 113 * helper can trace which firmware images have been cached 114 * before. 115 */ 116 spinlock_t name_lock; 117 struct list_head fw_names; 118 119 struct delayed_work work; 120 121 struct notifier_block pm_notify; 122#endif 123}; 124 125struct firmware_buf { 126 struct kref ref; 127 struct list_head list; 128 struct completion completion; 129 struct firmware_cache *fwc; 130 unsigned long status; 131 enum fw_buf_fmt fmt; 132 void *data; 133 size_t size; 134 struct page **pages; 135 int nr_pages; 136 int page_array_size; 137 char fw_id[]; 138}; 139 140struct fw_cache_entry { 141 struct list_head list; 142 char name[]; 143}; 144 145struct firmware_priv { 146 struct delayed_work timeout_work; 147 bool nowait; 148 struct device dev; 149 struct firmware_buf *buf; 150 struct firmware *fw; 151}; 152 153struct fw_name_devm { 154 unsigned long magic; 155 char name[]; 156}; 157 158#define to_fwbuf(d) container_of(d, struct firmware_buf, ref) 159 160#define FW_LOADER_NO_CACHE 0 161#define FW_LOADER_START_CACHE 1 162 163static int fw_cache_piggyback_on_request(const char *name); 164 165/* fw_lock could be moved to 'struct firmware_priv' but since it is just 166 * guarding for corner cases a global lock should be OK */ 167static DEFINE_MUTEX(fw_lock); 168 169static struct firmware_cache fw_cache; 170 171static struct firmware_buf *__allocate_fw_buf(const char *fw_name, 172 struct firmware_cache *fwc) 173{ 174 struct firmware_buf *buf; 175 176 buf = kzalloc(sizeof(*buf) + strlen(fw_name) + 1 , GFP_ATOMIC); 177 178 if (!buf) 179 return buf; 180 181 kref_init(&buf->ref); 182 strcpy(buf->fw_id, fw_name); 183 buf->fwc = fwc; 184 init_completion(&buf->completion); 185 buf->fmt = VMALLOC_BUF; 186 187 pr_debug("%s: fw-%s buf=%p\n", __func__, fw_name, buf); 188 189 return buf; 190} 191 192static struct firmware_buf *__fw_lookup_buf(const char *fw_name) 193{ 194 struct firmware_buf *tmp; 195 struct firmware_cache *fwc = &fw_cache; 196 197 list_for_each_entry(tmp, &fwc->head, list) 198 if (!strcmp(tmp->fw_id, fw_name)) 199 return tmp; 200 return NULL; 201} 202 203static int fw_lookup_and_allocate_buf(const char *fw_name, 204 struct firmware_cache *fwc, 205 struct firmware_buf **buf) 206{ 207 struct firmware_buf *tmp; 208 209 spin_lock(&fwc->lock); 210 tmp = __fw_lookup_buf(fw_name); 211 if (tmp) { 212 kref_get(&tmp->ref); 213 spin_unlock(&fwc->lock); 214 *buf = tmp; 215 return 1; 216 } 217 tmp = __allocate_fw_buf(fw_name, fwc); 218 if (tmp) 219 list_add(&tmp->list, &fwc->head); 220 spin_unlock(&fwc->lock); 221 222 *buf = tmp; 223 224 return tmp ? 0 : -ENOMEM; 225} 226 227static struct firmware_buf *fw_lookup_buf(const char *fw_name) 228{ 229 struct firmware_buf *tmp; 230 struct firmware_cache *fwc = &fw_cache; 231 232 spin_lock(&fwc->lock); 233 tmp = __fw_lookup_buf(fw_name); 234 spin_unlock(&fwc->lock); 235 236 return tmp; 237} 238 239static void __fw_free_buf(struct kref *ref) 240{ 241 struct firmware_buf *buf = to_fwbuf(ref); 242 struct firmware_cache *fwc = buf->fwc; 243 int i; 244 245 pr_debug("%s: fw-%s buf=%p data=%p size=%u\n", 246 __func__, buf->fw_id, buf, buf->data, 247 (unsigned int)buf->size); 248 249 list_del(&buf->list); 250 spin_unlock(&fwc->lock); 251 252 253 if (buf->fmt == PAGE_BUF) { 254 vunmap(buf->data); 255 for (i = 0; i < buf->nr_pages; i++) 256 __free_page(buf->pages[i]); 257 kfree(buf->pages); 258 } else 259 vfree(buf->data); 260 kfree(buf); 261} 262 263static void fw_free_buf(struct firmware_buf *buf) 264{ 265 struct firmware_cache *fwc = buf->fwc; 266 spin_lock(&fwc->lock); 267 if (!kref_put(&buf->ref, __fw_free_buf)) 268 spin_unlock(&fwc->lock); 269} 270 271/* direct firmware loading support */ 272static char fw_path_para[256]; 273static const char * const fw_path[] = { 274 fw_path_para, 275 "/lib/firmware/updates/" UTS_RELEASE, 276 "/lib/firmware/updates", 277 "/lib/firmware/" UTS_RELEASE, 278 "/lib/firmware" 279}; 280 281/* 282 * Typical usage is that passing 'firmware_class.path=$CUSTOMIZED_PATH' 283 * from kernel command line because firmware_class is generally built in 284 * kernel instead of module. 285 */ 286module_param_string(path, fw_path_para, sizeof(fw_path_para), 0644); 287MODULE_PARM_DESC(path, "customized firmware image search path with a higher priority than default path"); 288 289/* Don't inline this: 'struct kstat' is biggish */ 290static noinline_for_stack long fw_file_size(struct file *file) 291{ 292 struct kstat st; 293 if (vfs_getattr(file->f_path.mnt, file->f_path.dentry, &st)) 294 return -1; 295 if (!S_ISREG(st.mode)) 296 return -1; 297 if (st.size != (long)st.size) 298 return -1; 299 return st.size; 300} 301 302static bool fw_read_file_contents(struct file *file, struct firmware_buf *fw_buf) 303{ 304 long size; 305 char *buf; 306 307 size = fw_file_size(file); 308 if (size <= 0) 309 return false; 310 buf = vmalloc(size); 311 if (!buf) 312 return false; 313 if (kernel_read(file, 0, buf, size) != size) { 314 vfree(buf); 315 return false; 316 } 317 fw_buf->data = buf; 318 fw_buf->size = size; 319 return true; 320} 321 322static bool fw_get_filesystem_firmware(struct firmware_buf *buf) 323{ 324 int i; 325 bool success = false; 326 char *path = __getname(); 327 328 for (i = 0; i < ARRAY_SIZE(fw_path); i++) { 329 struct file *file; 330 331 /* skip the unset customized path */ 332 if (!fw_path[i][0]) 333 continue; 334 335 snprintf(path, PATH_MAX, "%s/%s", fw_path[i], buf->fw_id); 336 337 file = filp_open(path, O_RDONLY, 0); 338 if (IS_ERR(file)) 339 continue; 340 success = fw_read_file_contents(file, buf); 341 fput(file); 342 if (success) 343 break; 344 } 345 __putname(path); 346 return success; 347} 348 349static struct firmware_priv *to_firmware_priv(struct device *dev) 350{ 351 return container_of(dev, struct firmware_priv, dev); 352} 353 354static void fw_load_abort(struct firmware_priv *fw_priv) 355{ 356 struct firmware_buf *buf = fw_priv->buf; 357 358 set_bit(FW_STATUS_ABORT, &buf->status); 359 complete_all(&buf->completion); 360} 361 362static ssize_t firmware_timeout_show(struct class *class, 363 struct class_attribute *attr, 364 char *buf) 365{ 366 return sprintf(buf, "%d\n", loading_timeout); 367} 368 369/** 370 * firmware_timeout_store - set number of seconds to wait for firmware 371 * @class: device class pointer 372 * @attr: device attribute pointer 373 * @buf: buffer to scan for timeout value 374 * @count: number of bytes in @buf 375 * 376 * Sets the number of seconds to wait for the firmware. Once 377 * this expires an error will be returned to the driver and no 378 * firmware will be provided. 379 * 380 * Note: zero means 'wait forever'. 381 **/ 382static ssize_t firmware_timeout_store(struct class *class, 383 struct class_attribute *attr, 384 const char *buf, size_t count) 385{ 386 loading_timeout = simple_strtol(buf, NULL, 10); 387 if (loading_timeout < 0) 388 loading_timeout = 0; 389 390 return count; 391} 392 393static struct class_attribute firmware_class_attrs[] = { 394 __ATTR(timeout, S_IWUSR | S_IRUGO, 395 firmware_timeout_show, firmware_timeout_store), 396 __ATTR_NULL 397}; 398 399static void fw_dev_release(struct device *dev) 400{ 401 struct firmware_priv *fw_priv = to_firmware_priv(dev); 402 403 kfree(fw_priv); 404 405 module_put(THIS_MODULE); 406} 407 408static int firmware_uevent(struct device *dev, struct kobj_uevent_env *env) 409{ 410 struct firmware_priv *fw_priv = to_firmware_priv(dev); 411 412 if (add_uevent_var(env, "FIRMWARE=%s", fw_priv->buf->fw_id)) 413 return -ENOMEM; 414 if (add_uevent_var(env, "TIMEOUT=%i", loading_timeout)) 415 return -ENOMEM; 416 if (add_uevent_var(env, "ASYNC=%d", fw_priv->nowait)) 417 return -ENOMEM; 418 419 return 0; 420} 421 422static struct class firmware_class = { 423 .name = "firmware", 424 .class_attrs = firmware_class_attrs, 425 .dev_uevent = firmware_uevent, 426 .dev_release = fw_dev_release, 427}; 428 429static ssize_t firmware_loading_show(struct device *dev, 430 struct device_attribute *attr, char *buf) 431{ 432 struct firmware_priv *fw_priv = to_firmware_priv(dev); 433 int loading = test_bit(FW_STATUS_LOADING, &fw_priv->buf->status); 434 435 return sprintf(buf, "%d\n", loading); 436} 437 438/* firmware holds the ownership of pages */ 439static void firmware_free_data(const struct firmware *fw) 440{ 441 /* Loaded directly? */ 442 if (!fw->priv) { 443 vfree(fw->data); 444 return; 445 } 446 fw_free_buf(fw->priv); 447} 448 449/* Some architectures don't have PAGE_KERNEL_RO */ 450#ifndef PAGE_KERNEL_RO 451#define PAGE_KERNEL_RO PAGE_KERNEL 452#endif 453 454/* one pages buffer should be mapped/unmapped only once */ 455static int fw_map_pages_buf(struct firmware_buf *buf) 456{ 457 if (buf->fmt != PAGE_BUF) 458 return 0; 459 460 if (buf->data) 461 vunmap(buf->data); 462 buf->data = vmap(buf->pages, buf->nr_pages, 0, PAGE_KERNEL_RO); 463 if (!buf->data) 464 return -ENOMEM; 465 return 0; 466} 467 468/** 469 * firmware_loading_store - set value in the 'loading' control file 470 * @dev: device pointer 471 * @attr: device attribute pointer 472 * @buf: buffer to scan for loading control value 473 * @count: number of bytes in @buf 474 * 475 * The relevant values are: 476 * 477 * 1: Start a load, discarding any previous partial load. 478 * 0: Conclude the load and hand the data to the driver code. 479 * -1: Conclude the load with an error and discard any written data. 480 **/ 481static ssize_t firmware_loading_store(struct device *dev, 482 struct device_attribute *attr, 483 const char *buf, size_t count) 484{ 485 struct firmware_priv *fw_priv = to_firmware_priv(dev); 486 struct firmware_buf *fw_buf = fw_priv->buf; 487 int loading = simple_strtol(buf, NULL, 10); 488 int i; 489 490 mutex_lock(&fw_lock); 491 492 if (!fw_buf) 493 goto out; 494 495 switch (loading) { 496 case 1: 497 /* discarding any previous partial load */ 498 if (!test_bit(FW_STATUS_DONE, &fw_buf->status)) { 499 for (i = 0; i < fw_buf->nr_pages; i++) 500 __free_page(fw_buf->pages[i]); 501 kfree(fw_buf->pages); 502 fw_buf->pages = NULL; 503 fw_buf->page_array_size = 0; 504 fw_buf->nr_pages = 0; 505 set_bit(FW_STATUS_LOADING, &fw_buf->status); 506 } 507 break; 508 case 0: 509 if (test_bit(FW_STATUS_LOADING, &fw_buf->status)) { 510 set_bit(FW_STATUS_DONE, &fw_buf->status); 511 clear_bit(FW_STATUS_LOADING, &fw_buf->status); 512 513 /* 514 * Several loading requests may be pending on 515 * one same firmware buf, so let all requests 516 * see the mapped 'buf->data' once the loading 517 * is completed. 518 * */ 519 fw_map_pages_buf(fw_buf); 520 complete_all(&fw_buf->completion); 521 break; 522 } 523 /* fallthrough */ 524 default: 525 dev_err(dev, "%s: unexpected value (%d)\n", __func__, loading); 526 /* fallthrough */ 527 case -1: 528 fw_load_abort(fw_priv); 529 break; 530 } 531out: 532 mutex_unlock(&fw_lock); 533 return count; 534} 535 536static DEVICE_ATTR(loading, 0644, firmware_loading_show, firmware_loading_store); 537 538static ssize_t firmware_data_read(struct file *filp, struct kobject *kobj, 539 struct bin_attribute *bin_attr, 540 char *buffer, loff_t offset, size_t count) 541{ 542 struct device *dev = kobj_to_dev(kobj); 543 struct firmware_priv *fw_priv = to_firmware_priv(dev); 544 struct firmware_buf *buf; 545 ssize_t ret_count; 546 547 mutex_lock(&fw_lock); 548 buf = fw_priv->buf; 549 if (!buf || test_bit(FW_STATUS_DONE, &buf->status)) { 550 ret_count = -ENODEV; 551 goto out; 552 } 553 if (offset > buf->size) { 554 ret_count = 0; 555 goto out; 556 } 557 if (count > buf->size - offset) 558 count = buf->size - offset; 559 560 ret_count = count; 561 562 while (count) { 563 void *page_data; 564 int page_nr = offset >> PAGE_SHIFT; 565 int page_ofs = offset & (PAGE_SIZE-1); 566 int page_cnt = min_t(size_t, PAGE_SIZE - page_ofs, count); 567 568 page_data = kmap(buf->pages[page_nr]); 569 570 memcpy(buffer, page_data + page_ofs, page_cnt); 571 572 kunmap(buf->pages[page_nr]); 573 buffer += page_cnt; 574 offset += page_cnt; 575 count -= page_cnt; 576 } 577out: 578 mutex_unlock(&fw_lock); 579 return ret_count; 580} 581 582static int fw_realloc_buffer(struct firmware_priv *fw_priv, int min_size) 583{ 584 struct firmware_buf *buf = fw_priv->buf; 585 int pages_needed = ALIGN(min_size, PAGE_SIZE) >> PAGE_SHIFT; 586 587 /* If the array of pages is too small, grow it... */ 588 if (buf->page_array_size < pages_needed) { 589 int new_array_size = max(pages_needed, 590 buf->page_array_size * 2); 591 struct page **new_pages; 592 593 new_pages = kmalloc(new_array_size * sizeof(void *), 594 GFP_KERNEL); 595 if (!new_pages) { 596 fw_load_abort(fw_priv); 597 return -ENOMEM; 598 } 599 memcpy(new_pages, buf->pages, 600 buf->page_array_size * sizeof(void *)); 601 memset(&new_pages[buf->page_array_size], 0, sizeof(void *) * 602 (new_array_size - buf->page_array_size)); 603 kfree(buf->pages); 604 buf->pages = new_pages; 605 buf->page_array_size = new_array_size; 606 } 607 608 while (buf->nr_pages < pages_needed) { 609 buf->pages[buf->nr_pages] = 610 alloc_page(GFP_KERNEL | __GFP_HIGHMEM); 611 612 if (!buf->pages[buf->nr_pages]) { 613 fw_load_abort(fw_priv); 614 return -ENOMEM; 615 } 616 buf->nr_pages++; 617 } 618 return 0; 619} 620 621/** 622 * firmware_data_write - write method for firmware 623 * @filp: open sysfs file 624 * @kobj: kobject for the device 625 * @bin_attr: bin_attr structure 626 * @buffer: buffer being written 627 * @offset: buffer offset for write in total data store area 628 * @count: buffer size 629 * 630 * Data written to the 'data' attribute will be later handed to 631 * the driver as a firmware image. 632 **/ 633static ssize_t firmware_data_write(struct file *filp, struct kobject *kobj, 634 struct bin_attribute *bin_attr, 635 char *buffer, loff_t offset, size_t count) 636{ 637 struct device *dev = kobj_to_dev(kobj); 638 struct firmware_priv *fw_priv = to_firmware_priv(dev); 639 struct firmware_buf *buf; 640 ssize_t retval; 641 642 if (!capable(CAP_SYS_RAWIO)) 643 return -EPERM; 644 645 mutex_lock(&fw_lock); 646 buf = fw_priv->buf; 647 if (!buf || test_bit(FW_STATUS_DONE, &buf->status)) { 648 retval = -ENODEV; 649 goto out; 650 } 651 652 retval = fw_realloc_buffer(fw_priv, offset + count); 653 if (retval) 654 goto out; 655 656 retval = count; 657 658 while (count) { 659 void *page_data; 660 int page_nr = offset >> PAGE_SHIFT; 661 int page_ofs = offset & (PAGE_SIZE - 1); 662 int page_cnt = min_t(size_t, PAGE_SIZE - page_ofs, count); 663 664 page_data = kmap(buf->pages[page_nr]); 665 666 memcpy(page_data + page_ofs, buffer, page_cnt); 667 668 kunmap(buf->pages[page_nr]); 669 buffer += page_cnt; 670 offset += page_cnt; 671 count -= page_cnt; 672 } 673 674 buf->size = max_t(size_t, offset, buf->size); 675out: 676 mutex_unlock(&fw_lock); 677 return retval; 678} 679 680static struct bin_attribute firmware_attr_data = { 681 .attr = { .name = "data", .mode = 0644 }, 682 .size = 0, 683 .read = firmware_data_read, 684 .write = firmware_data_write, 685}; 686 687static void firmware_class_timeout_work(struct work_struct *work) 688{ 689 struct firmware_priv *fw_priv = container_of(work, 690 struct firmware_priv, timeout_work.work); 691 692 mutex_lock(&fw_lock); 693 if (test_bit(FW_STATUS_DONE, &(fw_priv->buf->status))) { 694 mutex_unlock(&fw_lock); 695 return; 696 } 697 fw_load_abort(fw_priv); 698 mutex_unlock(&fw_lock); 699} 700 701static struct firmware_priv * 702fw_create_instance(struct firmware *firmware, const char *fw_name, 703 struct device *device, bool uevent, bool nowait) 704{ 705 struct firmware_priv *fw_priv; 706 struct device *f_dev; 707 708 fw_priv = kzalloc(sizeof(*fw_priv), GFP_KERNEL); 709 if (!fw_priv) { 710 dev_err(device, "%s: kmalloc failed\n", __func__); 711 fw_priv = ERR_PTR(-ENOMEM); 712 goto exit; 713 } 714 715 fw_priv->nowait = nowait; 716 fw_priv->fw = firmware; 717 INIT_DELAYED_WORK(&fw_priv->timeout_work, 718 firmware_class_timeout_work); 719 720 f_dev = &fw_priv->dev; 721 722 device_initialize(f_dev); 723 dev_set_name(f_dev, "%s", fw_name); 724 f_dev->parent = device; 725 f_dev->class = &firmware_class; 726exit: 727 return fw_priv; 728} 729 730/* store the pages buffer info firmware from buf */ 731static void fw_set_page_data(struct firmware_buf *buf, struct firmware *fw) 732{ 733 fw->priv = buf; 734 fw->pages = buf->pages; 735 fw->size = buf->size; 736 fw->data = buf->data; 737 738 pr_debug("%s: fw-%s buf=%p data=%p size=%u\n", 739 __func__, buf->fw_id, buf, buf->data, 740 (unsigned int)buf->size); 741} 742 743#ifdef CONFIG_PM_SLEEP 744static void fw_name_devm_release(struct device *dev, void *res) 745{ 746 struct fw_name_devm *fwn = res; 747 748 if (fwn->magic == (unsigned long)&fw_cache) 749 pr_debug("%s: fw_name-%s devm-%p released\n", 750 __func__, fwn->name, res); 751} 752 753static int fw_devm_match(struct device *dev, void *res, 754 void *match_data) 755{ 756 struct fw_name_devm *fwn = res; 757 758 return (fwn->magic == (unsigned long)&fw_cache) && 759 !strcmp(fwn->name, match_data); 760} 761 762static struct fw_name_devm *fw_find_devm_name(struct device *dev, 763 const char *name) 764{ 765 struct fw_name_devm *fwn; 766 767 fwn = devres_find(dev, fw_name_devm_release, 768 fw_devm_match, (void *)name); 769 return fwn; 770} 771 772/* add firmware name into devres list */ 773static int fw_add_devm_name(struct device *dev, const char *name) 774{ 775 struct fw_name_devm *fwn; 776 777 fwn = fw_find_devm_name(dev, name); 778 if (fwn) 779 return 1; 780 781 fwn = devres_alloc(fw_name_devm_release, sizeof(struct fw_name_devm) + 782 strlen(name) + 1, GFP_KERNEL); 783 if (!fwn) 784 return -ENOMEM; 785 786 fwn->magic = (unsigned long)&fw_cache; 787 strcpy(fwn->name, name); 788 devres_add(dev, fwn); 789 790 return 0; 791} 792#else 793static int fw_add_devm_name(struct device *dev, const char *name) 794{ 795 return 0; 796} 797#endif 798 799static void _request_firmware_cleanup(const struct firmware **firmware_p) 800{ 801 release_firmware(*firmware_p); 802 *firmware_p = NULL; 803} 804 805static struct firmware_priv * 806_request_firmware_prepare(const struct firmware **firmware_p, const char *name, 807 struct device *device, bool uevent, bool nowait) 808{ 809 struct firmware *firmware; 810 struct firmware_priv *fw_priv = NULL; 811 struct firmware_buf *buf; 812 int ret; 813 814 if (!firmware_p) 815 return ERR_PTR(-EINVAL); 816 817 *firmware_p = firmware = kzalloc(sizeof(*firmware), GFP_KERNEL); 818 if (!firmware) { 819 dev_err(device, "%s: kmalloc(struct firmware) failed\n", 820 __func__); 821 return ERR_PTR(-ENOMEM); 822 } 823 824 if (fw_get_builtin_firmware(firmware, name)) { 825 dev_dbg(device, "firmware: using built-in firmware %s\n", name); 826 return NULL; 827 } 828 829 ret = fw_lookup_and_allocate_buf(name, &fw_cache, &buf); 830 if (!ret) 831 fw_priv = fw_create_instance(firmware, name, device, 832 uevent, nowait); 833 834 if (IS_ERR(fw_priv) || ret < 0) { 835 kfree(firmware); 836 *firmware_p = NULL; 837 return ERR_PTR(-ENOMEM); 838 } else if (fw_priv) { 839 fw_priv->buf = buf; 840 841 /* 842 * bind with 'buf' now to avoid warning in failure path 843 * of requesting firmware. 844 */ 845 firmware->priv = buf; 846 return fw_priv; 847 } 848 849 /* share the cached buf, which is inprogessing or completed */ 850 check_status: 851 mutex_lock(&fw_lock); 852 if (test_bit(FW_STATUS_ABORT, &buf->status)) { 853 fw_priv = ERR_PTR(-ENOENT); 854 firmware->priv = buf; 855 _request_firmware_cleanup(firmware_p); 856 goto exit; 857 } else if (test_bit(FW_STATUS_DONE, &buf->status)) { 858 fw_priv = NULL; 859 fw_set_page_data(buf, firmware); 860 goto exit; 861 } 862 mutex_unlock(&fw_lock); 863 wait_for_completion(&buf->completion); 864 goto check_status; 865 866exit: 867 mutex_unlock(&fw_lock); 868 return fw_priv; 869} 870 871static int _request_firmware_load(struct firmware_priv *fw_priv, bool uevent, 872 long timeout) 873{ 874 int retval = 0; 875 struct device *f_dev = &fw_priv->dev; 876 struct firmware_buf *buf = fw_priv->buf; 877 struct firmware_cache *fwc = &fw_cache; 878 int direct_load = 0; 879 880 /* try direct loading from fs first */ 881 if (fw_get_filesystem_firmware(buf)) { 882 dev_dbg(f_dev->parent, "firmware: direct-loading" 883 " firmware %s\n", buf->fw_id); 884 885 mutex_lock(&fw_lock); 886 set_bit(FW_STATUS_DONE, &buf->status); 887 mutex_unlock(&fw_lock); 888 complete_all(&buf->completion); 889 direct_load = 1; 890 goto handle_fw; 891 } 892 893 /* fall back on userspace loading */ 894 buf->fmt = PAGE_BUF; 895 896 dev_set_uevent_suppress(f_dev, true); 897 898 /* Need to pin this module until class device is destroyed */ 899 __module_get(THIS_MODULE); 900 901 retval = device_add(f_dev); 902 if (retval) { 903 dev_err(f_dev, "%s: device_register failed\n", __func__); 904 goto err_put_dev; 905 } 906 907 retval = device_create_bin_file(f_dev, &firmware_attr_data); 908 if (retval) { 909 dev_err(f_dev, "%s: sysfs_create_bin_file failed\n", __func__); 910 goto err_del_dev; 911 } 912 913 retval = device_create_file(f_dev, &dev_attr_loading); 914 if (retval) { 915 dev_err(f_dev, "%s: device_create_file failed\n", __func__); 916 goto err_del_bin_attr; 917 } 918 919 if (uevent) { 920 dev_set_uevent_suppress(f_dev, false); 921 dev_dbg(f_dev, "firmware: requesting %s\n", buf->fw_id); 922 if (timeout != MAX_SCHEDULE_TIMEOUT) 923 schedule_delayed_work(&fw_priv->timeout_work, timeout); 924 925 kobject_uevent(&fw_priv->dev.kobj, KOBJ_ADD); 926 } 927 928 wait_for_completion(&buf->completion); 929 930 cancel_delayed_work_sync(&fw_priv->timeout_work); 931 932handle_fw: 933 mutex_lock(&fw_lock); 934 if (!buf->size || test_bit(FW_STATUS_ABORT, &buf->status)) 935 retval = -ENOENT; 936 937 /* 938 * add firmware name into devres list so that we can auto cache 939 * and uncache firmware for device. 940 * 941 * f_dev->parent may has been deleted already, but the problem 942 * should be fixed in devres or driver core. 943 */ 944 if (!retval && f_dev->parent) 945 fw_add_devm_name(f_dev->parent, buf->fw_id); 946 947 /* 948 * After caching firmware image is started, let it piggyback 949 * on request firmware. 950 */ 951 if (!retval && fwc->state == FW_LOADER_START_CACHE) { 952 if (fw_cache_piggyback_on_request(buf->fw_id)) 953 kref_get(&buf->ref); 954 } 955 956 /* pass the pages buffer to driver at the last minute */ 957 fw_set_page_data(buf, fw_priv->fw); 958 959 fw_priv->buf = NULL; 960 mutex_unlock(&fw_lock); 961 962 if (direct_load) 963 goto err_put_dev; 964 965 device_remove_file(f_dev, &dev_attr_loading); 966err_del_bin_attr: 967 device_remove_bin_file(f_dev, &firmware_attr_data); 968err_del_dev: 969 device_del(f_dev); 970err_put_dev: 971 put_device(f_dev); 972 return retval; 973} 974 975/** 976 * request_firmware: - send firmware request and wait for it 977 * @firmware_p: pointer to firmware image 978 * @name: name of firmware file 979 * @device: device for which firmware is being loaded 980 * 981 * @firmware_p will be used to return a firmware image by the name 982 * of @name for device @device. 983 * 984 * Should be called from user context where sleeping is allowed. 985 * 986 * @name will be used as $FIRMWARE in the uevent environment and 987 * should be distinctive enough not to be confused with any other 988 * firmware image for this or any other device. 989 * 990 * Caller must hold the reference count of @device. 991 * 992 * The function can be called safely inside device's suspend and 993 * resume callback. 994 **/ 995int 996request_firmware(const struct firmware **firmware_p, const char *name, 997 struct device *device) 998{ 999 struct firmware_priv *fw_priv; 1000 int ret; 1001 1002 fw_priv = _request_firmware_prepare(firmware_p, name, device, true, 1003 false); 1004 if (IS_ERR_OR_NULL(fw_priv)) 1005 return PTR_RET(fw_priv); 1006 1007 ret = usermodehelper_read_trylock(); 1008 if (WARN_ON(ret)) { 1009 dev_err(device, "firmware: %s will not be loaded\n", name); 1010 } else { 1011 ret = _request_firmware_load(fw_priv, true, 1012 firmware_loading_timeout()); 1013 usermodehelper_read_unlock(); 1014 } 1015 if (ret) 1016 _request_firmware_cleanup(firmware_p); 1017 1018 return ret; 1019} 1020 1021/** 1022 * release_firmware: - release the resource associated with a firmware image 1023 * @fw: firmware resource to release 1024 **/ 1025void release_firmware(const struct firmware *fw) 1026{ 1027 if (fw) { 1028 if (!fw_is_builtin_firmware(fw)) 1029 firmware_free_data(fw); 1030 kfree(fw); 1031 } 1032} 1033 1034/* Async support */ 1035struct firmware_work { 1036 struct work_struct work; 1037 struct module *module; 1038 const char *name; 1039 struct device *device; 1040 void *context; 1041 void (*cont)(const struct firmware *fw, void *context); 1042 bool uevent; 1043}; 1044 1045static void request_firmware_work_func(struct work_struct *work) 1046{ 1047 struct firmware_work *fw_work; 1048 const struct firmware *fw; 1049 struct firmware_priv *fw_priv; 1050 long timeout; 1051 int ret; 1052 1053 fw_work = container_of(work, struct firmware_work, work); 1054 fw_priv = _request_firmware_prepare(&fw, fw_work->name, fw_work->device, 1055 fw_work->uevent, true); 1056 if (IS_ERR_OR_NULL(fw_priv)) { 1057 ret = PTR_RET(fw_priv); 1058 goto out; 1059 } 1060 1061 timeout = usermodehelper_read_lock_wait(firmware_loading_timeout()); 1062 if (timeout) { 1063 ret = _request_firmware_load(fw_priv, fw_work->uevent, timeout); 1064 usermodehelper_read_unlock(); 1065 } else { 1066 dev_dbg(fw_work->device, "firmware: %s loading timed out\n", 1067 fw_work->name); 1068 ret = -EAGAIN; 1069 } 1070 if (ret) 1071 _request_firmware_cleanup(&fw); 1072 1073 out: 1074 fw_work->cont(fw, fw_work->context); 1075 put_device(fw_work->device); 1076 1077 module_put(fw_work->module); 1078 kfree(fw_work); 1079} 1080 1081/** 1082 * request_firmware_nowait - asynchronous version of request_firmware 1083 * @module: module requesting the firmware 1084 * @uevent: sends uevent to copy the firmware image if this flag 1085 * is non-zero else the firmware copy must be done manually. 1086 * @name: name of firmware file 1087 * @device: device for which firmware is being loaded 1088 * @gfp: allocation flags 1089 * @context: will be passed over to @cont, and 1090 * @fw may be %NULL if firmware request fails. 1091 * @cont: function will be called asynchronously when the firmware 1092 * request is over. 1093 * 1094 * Caller must hold the reference count of @device. 1095 * 1096 * Asynchronous variant of request_firmware() for user contexts: 1097 * - sleep for as small periods as possible since it may 1098 * increase kernel boot time of built-in device drivers 1099 * requesting firmware in their ->probe() methods, if 1100 * @gfp is GFP_KERNEL. 1101 * 1102 * - can't sleep at all if @gfp is GFP_ATOMIC. 1103 **/ 1104int 1105request_firmware_nowait( 1106 struct module *module, bool uevent, 1107 const char *name, struct device *device, gfp_t gfp, void *context, 1108 void (*cont)(const struct firmware *fw, void *context)) 1109{ 1110 struct firmware_work *fw_work; 1111 1112 fw_work = kzalloc(sizeof (struct firmware_work), gfp); 1113 if (!fw_work) 1114 return -ENOMEM; 1115 1116 fw_work->module = module; 1117 fw_work->name = name; 1118 fw_work->device = device; 1119 fw_work->context = context; 1120 fw_work->cont = cont; 1121 fw_work->uevent = uevent; 1122 1123 if (!try_module_get(module)) { 1124 kfree(fw_work); 1125 return -EFAULT; 1126 } 1127 1128 get_device(fw_work->device); 1129 INIT_WORK(&fw_work->work, request_firmware_work_func); 1130 schedule_work(&fw_work->work); 1131 return 0; 1132} 1133 1134/** 1135 * cache_firmware - cache one firmware image in kernel memory space 1136 * @fw_name: the firmware image name 1137 * 1138 * Cache firmware in kernel memory so that drivers can use it when 1139 * system isn't ready for them to request firmware image from userspace. 1140 * Once it returns successfully, driver can use request_firmware or its 1141 * nowait version to get the cached firmware without any interacting 1142 * with userspace 1143 * 1144 * Return 0 if the firmware image has been cached successfully 1145 * Return !0 otherwise 1146 * 1147 */ 1148int cache_firmware(const char *fw_name) 1149{ 1150 int ret; 1151 const struct firmware *fw; 1152 1153 pr_debug("%s: %s\n", __func__, fw_name); 1154 1155 ret = request_firmware(&fw, fw_name, NULL); 1156 if (!ret) 1157 kfree(fw); 1158 1159 pr_debug("%s: %s ret=%d\n", __func__, fw_name, ret); 1160 1161 return ret; 1162} 1163 1164/** 1165 * uncache_firmware - remove one cached firmware image 1166 * @fw_name: the firmware image name 1167 * 1168 * Uncache one firmware image which has been cached successfully 1169 * before. 1170 * 1171 * Return 0 if the firmware cache has been removed successfully 1172 * Return !0 otherwise 1173 * 1174 */ 1175int uncache_firmware(const char *fw_name) 1176{ 1177 struct firmware_buf *buf; 1178 struct firmware fw; 1179 1180 pr_debug("%s: %s\n", __func__, fw_name); 1181 1182 if (fw_get_builtin_firmware(&fw, fw_name)) 1183 return 0; 1184 1185 buf = fw_lookup_buf(fw_name); 1186 if (buf) { 1187 fw_free_buf(buf); 1188 return 0; 1189 } 1190 1191 return -EINVAL; 1192} 1193 1194#ifdef CONFIG_PM_SLEEP 1195static ASYNC_DOMAIN_EXCLUSIVE(fw_cache_domain); 1196 1197static struct fw_cache_entry *alloc_fw_cache_entry(const char *name) 1198{ 1199 struct fw_cache_entry *fce; 1200 1201 fce = kzalloc(sizeof(*fce) + strlen(name) + 1, GFP_ATOMIC); 1202 if (!fce) 1203 goto exit; 1204 1205 strcpy(fce->name, name); 1206exit: 1207 return fce; 1208} 1209 1210static int __fw_entry_found(const char *name) 1211{ 1212 struct firmware_cache *fwc = &fw_cache; 1213 struct fw_cache_entry *fce; 1214 1215 list_for_each_entry(fce, &fwc->fw_names, list) { 1216 if (!strcmp(fce->name, name)) 1217 return 1; 1218 } 1219 return 0; 1220} 1221 1222static int fw_cache_piggyback_on_request(const char *name) 1223{ 1224 struct firmware_cache *fwc = &fw_cache; 1225 struct fw_cache_entry *fce; 1226 int ret = 0; 1227 1228 spin_lock(&fwc->name_lock); 1229 if (__fw_entry_found(name)) 1230 goto found; 1231 1232 fce = alloc_fw_cache_entry(name); 1233 if (fce) { 1234 ret = 1; 1235 list_add(&fce->list, &fwc->fw_names); 1236 pr_debug("%s: fw: %s\n", __func__, name); 1237 } 1238found: 1239 spin_unlock(&fwc->name_lock); 1240 return ret; 1241} 1242 1243static void free_fw_cache_entry(struct fw_cache_entry *fce) 1244{ 1245 kfree(fce); 1246} 1247 1248static void __async_dev_cache_fw_image(void *fw_entry, 1249 async_cookie_t cookie) 1250{ 1251 struct fw_cache_entry *fce = fw_entry; 1252 struct firmware_cache *fwc = &fw_cache; 1253 int ret; 1254 1255 ret = cache_firmware(fce->name); 1256 if (ret) { 1257 spin_lock(&fwc->name_lock); 1258 list_del(&fce->list); 1259 spin_unlock(&fwc->name_lock); 1260 1261 free_fw_cache_entry(fce); 1262 } 1263} 1264 1265/* called with dev->devres_lock held */ 1266static void dev_create_fw_entry(struct device *dev, void *res, 1267 void *data) 1268{ 1269 struct fw_name_devm *fwn = res; 1270 const char *fw_name = fwn->name; 1271 struct list_head *head = data; 1272 struct fw_cache_entry *fce; 1273 1274 fce = alloc_fw_cache_entry(fw_name); 1275 if (fce) 1276 list_add(&fce->list, head); 1277} 1278 1279static int devm_name_match(struct device *dev, void *res, 1280 void *match_data) 1281{ 1282 struct fw_name_devm *fwn = res; 1283 return (fwn->magic == (unsigned long)match_data); 1284} 1285 1286static void dev_cache_fw_image(struct device *dev, void *data) 1287{ 1288 LIST_HEAD(todo); 1289 struct fw_cache_entry *fce; 1290 struct fw_cache_entry *fce_next; 1291 struct firmware_cache *fwc = &fw_cache; 1292 1293 devres_for_each_res(dev, fw_name_devm_release, 1294 devm_name_match, &fw_cache, 1295 dev_create_fw_entry, &todo); 1296 1297 list_for_each_entry_safe(fce, fce_next, &todo, list) { 1298 list_del(&fce->list); 1299 1300 spin_lock(&fwc->name_lock); 1301 /* only one cache entry for one firmware */ 1302 if (!__fw_entry_found(fce->name)) { 1303 list_add(&fce->list, &fwc->fw_names); 1304 } else { 1305 free_fw_cache_entry(fce); 1306 fce = NULL; 1307 } 1308 spin_unlock(&fwc->name_lock); 1309 1310 if (fce) 1311 async_schedule_domain(__async_dev_cache_fw_image, 1312 (void *)fce, 1313 &fw_cache_domain); 1314 } 1315} 1316 1317static void __device_uncache_fw_images(void) 1318{ 1319 struct firmware_cache *fwc = &fw_cache; 1320 struct fw_cache_entry *fce; 1321 1322 spin_lock(&fwc->name_lock); 1323 while (!list_empty(&fwc->fw_names)) { 1324 fce = list_entry(fwc->fw_names.next, 1325 struct fw_cache_entry, list); 1326 list_del(&fce->list); 1327 spin_unlock(&fwc->name_lock); 1328 1329 uncache_firmware(fce->name); 1330 free_fw_cache_entry(fce); 1331 1332 spin_lock(&fwc->name_lock); 1333 } 1334 spin_unlock(&fwc->name_lock); 1335} 1336 1337/** 1338 * device_cache_fw_images - cache devices' firmware 1339 * 1340 * If one device called request_firmware or its nowait version 1341 * successfully before, the firmware names are recored into the 1342 * device's devres link list, so device_cache_fw_images can call 1343 * cache_firmware() to cache these firmwares for the device, 1344 * then the device driver can load its firmwares easily at 1345 * time when system is not ready to complete loading firmware. 1346 */ 1347static void device_cache_fw_images(void) 1348{ 1349 struct firmware_cache *fwc = &fw_cache; 1350 int old_timeout; 1351 DEFINE_WAIT(wait); 1352 1353 pr_debug("%s\n", __func__); 1354 1355 /* cancel uncache work */ 1356 cancel_delayed_work_sync(&fwc->work); 1357 1358 /* 1359 * use small loading timeout for caching devices' firmware 1360 * because all these firmware images have been loaded 1361 * successfully at lease once, also system is ready for 1362 * completing firmware loading now. The maximum size of 1363 * firmware in current distributions is about 2M bytes, 1364 * so 10 secs should be enough. 1365 */ 1366 old_timeout = loading_timeout; 1367 loading_timeout = 10; 1368 1369 mutex_lock(&fw_lock); 1370 fwc->state = FW_LOADER_START_CACHE; 1371 dpm_for_each_dev(NULL, dev_cache_fw_image); 1372 mutex_unlock(&fw_lock); 1373 1374 /* wait for completion of caching firmware for all devices */ 1375 async_synchronize_full_domain(&fw_cache_domain); 1376 1377 loading_timeout = old_timeout; 1378} 1379 1380/** 1381 * device_uncache_fw_images - uncache devices' firmware 1382 * 1383 * uncache all firmwares which have been cached successfully 1384 * by device_uncache_fw_images earlier 1385 */ 1386static void device_uncache_fw_images(void) 1387{ 1388 pr_debug("%s\n", __func__); 1389 __device_uncache_fw_images(); 1390} 1391 1392static void device_uncache_fw_images_work(struct work_struct *work) 1393{ 1394 device_uncache_fw_images(); 1395} 1396 1397/** 1398 * device_uncache_fw_images_delay - uncache devices firmwares 1399 * @delay: number of milliseconds to delay uncache device firmwares 1400 * 1401 * uncache all devices's firmwares which has been cached successfully 1402 * by device_cache_fw_images after @delay milliseconds. 1403 */ 1404static void device_uncache_fw_images_delay(unsigned long delay) 1405{ 1406 schedule_delayed_work(&fw_cache.work, 1407 msecs_to_jiffies(delay)); 1408} 1409 1410static int fw_pm_notify(struct notifier_block *notify_block, 1411 unsigned long mode, void *unused) 1412{ 1413 switch (mode) { 1414 case PM_HIBERNATION_PREPARE: 1415 case PM_SUSPEND_PREPARE: 1416 device_cache_fw_images(); 1417 break; 1418 1419 case PM_POST_SUSPEND: 1420 case PM_POST_HIBERNATION: 1421 case PM_POST_RESTORE: 1422 /* 1423 * In case that system sleep failed and syscore_suspend is 1424 * not called. 1425 */ 1426 mutex_lock(&fw_lock); 1427 fw_cache.state = FW_LOADER_NO_CACHE; 1428 mutex_unlock(&fw_lock); 1429 1430 device_uncache_fw_images_delay(10 * MSEC_PER_SEC); 1431 break; 1432 } 1433 1434 return 0; 1435} 1436 1437/* stop caching firmware once syscore_suspend is reached */ 1438static int fw_suspend(void) 1439{ 1440 fw_cache.state = FW_LOADER_NO_CACHE; 1441 return 0; 1442} 1443 1444static struct syscore_ops fw_syscore_ops = { 1445 .suspend = fw_suspend, 1446}; 1447#else 1448static int fw_cache_piggyback_on_request(const char *name) 1449{ 1450 return 0; 1451} 1452#endif 1453 1454static void __init fw_cache_init(void) 1455{ 1456 spin_lock_init(&fw_cache.lock); 1457 INIT_LIST_HEAD(&fw_cache.head); 1458 fw_cache.state = FW_LOADER_NO_CACHE; 1459 1460#ifdef CONFIG_PM_SLEEP 1461 spin_lock_init(&fw_cache.name_lock); 1462 INIT_LIST_HEAD(&fw_cache.fw_names); 1463 1464 INIT_DELAYED_WORK(&fw_cache.work, 1465 device_uncache_fw_images_work); 1466 1467 fw_cache.pm_notify.notifier_call = fw_pm_notify; 1468 register_pm_notifier(&fw_cache.pm_notify); 1469 1470 register_syscore_ops(&fw_syscore_ops); 1471#endif 1472} 1473 1474static int __init firmware_class_init(void) 1475{ 1476 fw_cache_init(); 1477 return class_register(&firmware_class); 1478} 1479 1480static void __exit firmware_class_exit(void) 1481{ 1482#ifdef CONFIG_PM_SLEEP 1483 unregister_syscore_ops(&fw_syscore_ops); 1484 unregister_pm_notifier(&fw_cache.pm_notify); 1485#endif 1486 class_unregister(&firmware_class); 1487} 1488 1489fs_initcall(firmware_class_init); 1490module_exit(firmware_class_exit); 1491 1492EXPORT_SYMBOL(release_firmware); 1493EXPORT_SYMBOL(request_firmware); 1494EXPORT_SYMBOL(request_firmware_nowait); 1495EXPORT_SYMBOL_GPL(cache_firmware); 1496EXPORT_SYMBOL_GPL(uncache_firmware);