at v5.0-rc8 1576 lines 36 kB view raw
1// SPDX-License-Identifier: GPL-2.0 2/* 3 * nvmem framework core. 4 * 5 * Copyright (C) 2015 Srinivas Kandagatla <srinivas.kandagatla@linaro.org> 6 * Copyright (C) 2013 Maxime Ripard <maxime.ripard@free-electrons.com> 7 */ 8 9#include <linux/device.h> 10#include <linux/export.h> 11#include <linux/fs.h> 12#include <linux/idr.h> 13#include <linux/init.h> 14#include <linux/kref.h> 15#include <linux/module.h> 16#include <linux/nvmem-consumer.h> 17#include <linux/nvmem-provider.h> 18#include <linux/of.h> 19#include <linux/slab.h> 20 21struct nvmem_device { 22 struct module *owner; 23 struct device dev; 24 int stride; 25 int word_size; 26 int id; 27 struct kref refcnt; 28 size_t size; 29 bool read_only; 30 int flags; 31 enum nvmem_type type; 32 struct bin_attribute eeprom; 33 struct device *base_dev; 34 struct list_head cells; 35 nvmem_reg_read_t reg_read; 36 nvmem_reg_write_t reg_write; 37 void *priv; 38}; 39 40#define FLAG_COMPAT BIT(0) 41 42struct nvmem_cell { 43 const char *name; 44 int offset; 45 int bytes; 46 int bit_offset; 47 int nbits; 48 struct device_node *np; 49 struct nvmem_device *nvmem; 50 struct list_head node; 51}; 52 53static DEFINE_MUTEX(nvmem_mutex); 54static DEFINE_IDA(nvmem_ida); 55 56static DEFINE_MUTEX(nvmem_cell_mutex); 57static LIST_HEAD(nvmem_cell_tables); 58 59static DEFINE_MUTEX(nvmem_lookup_mutex); 60static LIST_HEAD(nvmem_lookup_list); 61 62static BLOCKING_NOTIFIER_HEAD(nvmem_notifier); 63 64static const char * const nvmem_type_str[] = { 65 [NVMEM_TYPE_UNKNOWN] = "Unknown", 66 [NVMEM_TYPE_EEPROM] = "EEPROM", 67 [NVMEM_TYPE_OTP] = "OTP", 68 [NVMEM_TYPE_BATTERY_BACKED] = "Battery backed", 69}; 70 71#ifdef CONFIG_DEBUG_LOCK_ALLOC 72static struct lock_class_key eeprom_lock_key; 73#endif 74 75#define to_nvmem_device(d) container_of(d, struct nvmem_device, dev) 76static int nvmem_reg_read(struct nvmem_device *nvmem, unsigned int offset, 77 void *val, size_t bytes) 78{ 79 if (nvmem->reg_read) 80 return nvmem->reg_read(nvmem->priv, offset, val, bytes); 81 82 return -EINVAL; 83} 84 85static int nvmem_reg_write(struct nvmem_device *nvmem, unsigned int offset, 86 void *val, size_t bytes) 87{ 88 if (nvmem->reg_write) 89 return nvmem->reg_write(nvmem->priv, offset, val, bytes); 90 91 return -EINVAL; 92} 93 94static ssize_t type_show(struct device *dev, 95 struct device_attribute *attr, char *buf) 96{ 97 struct nvmem_device *nvmem = to_nvmem_device(dev); 98 99 return sprintf(buf, "%s\n", nvmem_type_str[nvmem->type]); 100} 101 102static DEVICE_ATTR_RO(type); 103 104static struct attribute *nvmem_attrs[] = { 105 &dev_attr_type.attr, 106 NULL, 107}; 108 109static ssize_t bin_attr_nvmem_read(struct file *filp, struct kobject *kobj, 110 struct bin_attribute *attr, 111 char *buf, loff_t pos, size_t count) 112{ 113 struct device *dev; 114 struct nvmem_device *nvmem; 115 int rc; 116 117 if (attr->private) 118 dev = attr->private; 119 else 120 dev = container_of(kobj, struct device, kobj); 121 nvmem = to_nvmem_device(dev); 122 123 /* Stop the user from reading */ 124 if (pos >= nvmem->size) 125 return 0; 126 127 if (count < nvmem->word_size) 128 return -EINVAL; 129 130 if (pos + count > nvmem->size) 131 count = nvmem->size - pos; 132 133 count = round_down(count, nvmem->word_size); 134 135 rc = nvmem_reg_read(nvmem, pos, buf, count); 136 137 if (rc) 138 return rc; 139 140 return count; 141} 142 143static ssize_t bin_attr_nvmem_write(struct file *filp, struct kobject *kobj, 144 struct bin_attribute *attr, 145 char *buf, loff_t pos, size_t count) 146{ 147 struct device *dev; 148 struct nvmem_device *nvmem; 149 int rc; 150 151 if (attr->private) 152 dev = attr->private; 153 else 154 dev = container_of(kobj, struct device, kobj); 155 nvmem = to_nvmem_device(dev); 156 157 /* Stop the user from writing */ 158 if (pos >= nvmem->size) 159 return -EFBIG; 160 161 if (count < nvmem->word_size) 162 return -EINVAL; 163 164 if (pos + count > nvmem->size) 165 count = nvmem->size - pos; 166 167 count = round_down(count, nvmem->word_size); 168 169 rc = nvmem_reg_write(nvmem, pos, buf, count); 170 171 if (rc) 172 return rc; 173 174 return count; 175} 176 177/* default read/write permissions */ 178static struct bin_attribute bin_attr_rw_nvmem = { 179 .attr = { 180 .name = "nvmem", 181 .mode = 0644, 182 }, 183 .read = bin_attr_nvmem_read, 184 .write = bin_attr_nvmem_write, 185}; 186 187static struct bin_attribute *nvmem_bin_rw_attributes[] = { 188 &bin_attr_rw_nvmem, 189 NULL, 190}; 191 192static const struct attribute_group nvmem_bin_rw_group = { 193 .bin_attrs = nvmem_bin_rw_attributes, 194 .attrs = nvmem_attrs, 195}; 196 197static const struct attribute_group *nvmem_rw_dev_groups[] = { 198 &nvmem_bin_rw_group, 199 NULL, 200}; 201 202/* read only permission */ 203static struct bin_attribute bin_attr_ro_nvmem = { 204 .attr = { 205 .name = "nvmem", 206 .mode = 0444, 207 }, 208 .read = bin_attr_nvmem_read, 209}; 210 211static struct bin_attribute *nvmem_bin_ro_attributes[] = { 212 &bin_attr_ro_nvmem, 213 NULL, 214}; 215 216static const struct attribute_group nvmem_bin_ro_group = { 217 .bin_attrs = nvmem_bin_ro_attributes, 218 .attrs = nvmem_attrs, 219}; 220 221static const struct attribute_group *nvmem_ro_dev_groups[] = { 222 &nvmem_bin_ro_group, 223 NULL, 224}; 225 226/* default read/write permissions, root only */ 227static struct bin_attribute bin_attr_rw_root_nvmem = { 228 .attr = { 229 .name = "nvmem", 230 .mode = 0600, 231 }, 232 .read = bin_attr_nvmem_read, 233 .write = bin_attr_nvmem_write, 234}; 235 236static struct bin_attribute *nvmem_bin_rw_root_attributes[] = { 237 &bin_attr_rw_root_nvmem, 238 NULL, 239}; 240 241static const struct attribute_group nvmem_bin_rw_root_group = { 242 .bin_attrs = nvmem_bin_rw_root_attributes, 243 .attrs = nvmem_attrs, 244}; 245 246static const struct attribute_group *nvmem_rw_root_dev_groups[] = { 247 &nvmem_bin_rw_root_group, 248 NULL, 249}; 250 251/* read only permission, root only */ 252static struct bin_attribute bin_attr_ro_root_nvmem = { 253 .attr = { 254 .name = "nvmem", 255 .mode = 0400, 256 }, 257 .read = bin_attr_nvmem_read, 258}; 259 260static struct bin_attribute *nvmem_bin_ro_root_attributes[] = { 261 &bin_attr_ro_root_nvmem, 262 NULL, 263}; 264 265static const struct attribute_group nvmem_bin_ro_root_group = { 266 .bin_attrs = nvmem_bin_ro_root_attributes, 267 .attrs = nvmem_attrs, 268}; 269 270static const struct attribute_group *nvmem_ro_root_dev_groups[] = { 271 &nvmem_bin_ro_root_group, 272 NULL, 273}; 274 275static void nvmem_release(struct device *dev) 276{ 277 struct nvmem_device *nvmem = to_nvmem_device(dev); 278 279 ida_simple_remove(&nvmem_ida, nvmem->id); 280 kfree(nvmem); 281} 282 283static const struct device_type nvmem_provider_type = { 284 .release = nvmem_release, 285}; 286 287static struct bus_type nvmem_bus_type = { 288 .name = "nvmem", 289}; 290 291static int of_nvmem_match(struct device *dev, void *nvmem_np) 292{ 293 return dev->of_node == nvmem_np; 294} 295 296static struct nvmem_device *of_nvmem_find(struct device_node *nvmem_np) 297{ 298 struct device *d; 299 300 if (!nvmem_np) 301 return NULL; 302 303 d = bus_find_device(&nvmem_bus_type, NULL, nvmem_np, of_nvmem_match); 304 305 if (!d) 306 return NULL; 307 308 return to_nvmem_device(d); 309} 310 311static struct nvmem_device *nvmem_find(const char *name) 312{ 313 struct device *d; 314 315 d = bus_find_device_by_name(&nvmem_bus_type, NULL, name); 316 317 if (!d) 318 return NULL; 319 320 return to_nvmem_device(d); 321} 322 323static void nvmem_cell_drop(struct nvmem_cell *cell) 324{ 325 blocking_notifier_call_chain(&nvmem_notifier, NVMEM_CELL_REMOVE, cell); 326 mutex_lock(&nvmem_mutex); 327 list_del(&cell->node); 328 mutex_unlock(&nvmem_mutex); 329 of_node_put(cell->np); 330 kfree(cell->name); 331 kfree(cell); 332} 333 334static void nvmem_device_remove_all_cells(const struct nvmem_device *nvmem) 335{ 336 struct nvmem_cell *cell, *p; 337 338 list_for_each_entry_safe(cell, p, &nvmem->cells, node) 339 nvmem_cell_drop(cell); 340} 341 342static void nvmem_cell_add(struct nvmem_cell *cell) 343{ 344 mutex_lock(&nvmem_mutex); 345 list_add_tail(&cell->node, &cell->nvmem->cells); 346 mutex_unlock(&nvmem_mutex); 347 blocking_notifier_call_chain(&nvmem_notifier, NVMEM_CELL_ADD, cell); 348} 349 350static int nvmem_cell_info_to_nvmem_cell(struct nvmem_device *nvmem, 351 const struct nvmem_cell_info *info, 352 struct nvmem_cell *cell) 353{ 354 cell->nvmem = nvmem; 355 cell->offset = info->offset; 356 cell->bytes = info->bytes; 357 cell->name = info->name; 358 359 cell->bit_offset = info->bit_offset; 360 cell->nbits = info->nbits; 361 362 if (cell->nbits) 363 cell->bytes = DIV_ROUND_UP(cell->nbits + cell->bit_offset, 364 BITS_PER_BYTE); 365 366 if (!IS_ALIGNED(cell->offset, nvmem->stride)) { 367 dev_err(&nvmem->dev, 368 "cell %s unaligned to nvmem stride %d\n", 369 cell->name, nvmem->stride); 370 return -EINVAL; 371 } 372 373 return 0; 374} 375 376/** 377 * nvmem_add_cells() - Add cell information to an nvmem device 378 * 379 * @nvmem: nvmem device to add cells to. 380 * @info: nvmem cell info to add to the device 381 * @ncells: number of cells in info 382 * 383 * Return: 0 or negative error code on failure. 384 */ 385static int nvmem_add_cells(struct nvmem_device *nvmem, 386 const struct nvmem_cell_info *info, 387 int ncells) 388{ 389 struct nvmem_cell **cells; 390 int i, rval; 391 392 cells = kcalloc(ncells, sizeof(*cells), GFP_KERNEL); 393 if (!cells) 394 return -ENOMEM; 395 396 for (i = 0; i < ncells; i++) { 397 cells[i] = kzalloc(sizeof(**cells), GFP_KERNEL); 398 if (!cells[i]) { 399 rval = -ENOMEM; 400 goto err; 401 } 402 403 rval = nvmem_cell_info_to_nvmem_cell(nvmem, &info[i], cells[i]); 404 if (rval) { 405 kfree(cells[i]); 406 goto err; 407 } 408 409 nvmem_cell_add(cells[i]); 410 } 411 412 /* remove tmp array */ 413 kfree(cells); 414 415 return 0; 416err: 417 while (i--) 418 nvmem_cell_drop(cells[i]); 419 420 kfree(cells); 421 422 return rval; 423} 424 425/* 426 * nvmem_setup_compat() - Create an additional binary entry in 427 * drivers sys directory, to be backwards compatible with the older 428 * drivers/misc/eeprom drivers. 429 */ 430static int nvmem_setup_compat(struct nvmem_device *nvmem, 431 const struct nvmem_config *config) 432{ 433 int rval; 434 435 if (!config->base_dev) 436 return -EINVAL; 437 438 if (nvmem->read_only) 439 nvmem->eeprom = bin_attr_ro_root_nvmem; 440 else 441 nvmem->eeprom = bin_attr_rw_root_nvmem; 442 nvmem->eeprom.attr.name = "eeprom"; 443 nvmem->eeprom.size = nvmem->size; 444#ifdef CONFIG_DEBUG_LOCK_ALLOC 445 nvmem->eeprom.attr.key = &eeprom_lock_key; 446#endif 447 nvmem->eeprom.private = &nvmem->dev; 448 nvmem->base_dev = config->base_dev; 449 450 rval = device_create_bin_file(nvmem->base_dev, &nvmem->eeprom); 451 if (rval) { 452 dev_err(&nvmem->dev, 453 "Failed to create eeprom binary file %d\n", rval); 454 return rval; 455 } 456 457 nvmem->flags |= FLAG_COMPAT; 458 459 return 0; 460} 461 462/** 463 * nvmem_register_notifier() - Register a notifier block for nvmem events. 464 * 465 * @nb: notifier block to be called on nvmem events. 466 * 467 * Return: 0 on success, negative error number on failure. 468 */ 469int nvmem_register_notifier(struct notifier_block *nb) 470{ 471 return blocking_notifier_chain_register(&nvmem_notifier, nb); 472} 473EXPORT_SYMBOL_GPL(nvmem_register_notifier); 474 475/** 476 * nvmem_unregister_notifier() - Unregister a notifier block for nvmem events. 477 * 478 * @nb: notifier block to be unregistered. 479 * 480 * Return: 0 on success, negative error number on failure. 481 */ 482int nvmem_unregister_notifier(struct notifier_block *nb) 483{ 484 return blocking_notifier_chain_unregister(&nvmem_notifier, nb); 485} 486EXPORT_SYMBOL_GPL(nvmem_unregister_notifier); 487 488static int nvmem_add_cells_from_table(struct nvmem_device *nvmem) 489{ 490 const struct nvmem_cell_info *info; 491 struct nvmem_cell_table *table; 492 struct nvmem_cell *cell; 493 int rval = 0, i; 494 495 mutex_lock(&nvmem_cell_mutex); 496 list_for_each_entry(table, &nvmem_cell_tables, node) { 497 if (strcmp(nvmem_dev_name(nvmem), table->nvmem_name) == 0) { 498 for (i = 0; i < table->ncells; i++) { 499 info = &table->cells[i]; 500 501 cell = kzalloc(sizeof(*cell), GFP_KERNEL); 502 if (!cell) { 503 rval = -ENOMEM; 504 goto out; 505 } 506 507 rval = nvmem_cell_info_to_nvmem_cell(nvmem, 508 info, 509 cell); 510 if (rval) { 511 kfree(cell); 512 goto out; 513 } 514 515 nvmem_cell_add(cell); 516 } 517 } 518 } 519 520out: 521 mutex_unlock(&nvmem_cell_mutex); 522 return rval; 523} 524 525static struct nvmem_cell * 526nvmem_find_cell_by_name(struct nvmem_device *nvmem, const char *cell_id) 527{ 528 struct nvmem_cell *cell = NULL; 529 530 mutex_lock(&nvmem_mutex); 531 list_for_each_entry(cell, &nvmem->cells, node) { 532 if (strcmp(cell_id, cell->name) == 0) 533 break; 534 } 535 mutex_unlock(&nvmem_mutex); 536 537 return cell; 538} 539 540static int nvmem_add_cells_from_of(struct nvmem_device *nvmem) 541{ 542 struct device_node *parent, *child; 543 struct device *dev = &nvmem->dev; 544 struct nvmem_cell *cell; 545 const __be32 *addr; 546 int len; 547 548 parent = dev->of_node; 549 550 for_each_child_of_node(parent, child) { 551 addr = of_get_property(child, "reg", &len); 552 if (!addr || (len < 2 * sizeof(u32))) { 553 dev_err(dev, "nvmem: invalid reg on %pOF\n", child); 554 return -EINVAL; 555 } 556 557 cell = kzalloc(sizeof(*cell), GFP_KERNEL); 558 if (!cell) 559 return -ENOMEM; 560 561 cell->nvmem = nvmem; 562 cell->np = of_node_get(child); 563 cell->offset = be32_to_cpup(addr++); 564 cell->bytes = be32_to_cpup(addr); 565 cell->name = kasprintf(GFP_KERNEL, "%pOFn", child); 566 567 addr = of_get_property(child, "bits", &len); 568 if (addr && len == (2 * sizeof(u32))) { 569 cell->bit_offset = be32_to_cpup(addr++); 570 cell->nbits = be32_to_cpup(addr); 571 } 572 573 if (cell->nbits) 574 cell->bytes = DIV_ROUND_UP( 575 cell->nbits + cell->bit_offset, 576 BITS_PER_BYTE); 577 578 if (!IS_ALIGNED(cell->offset, nvmem->stride)) { 579 dev_err(dev, "cell %s unaligned to nvmem stride %d\n", 580 cell->name, nvmem->stride); 581 /* Cells already added will be freed later. */ 582 kfree(cell->name); 583 kfree(cell); 584 return -EINVAL; 585 } 586 587 nvmem_cell_add(cell); 588 } 589 590 return 0; 591} 592 593/** 594 * nvmem_register() - Register a nvmem device for given nvmem_config. 595 * Also creates an binary entry in /sys/bus/nvmem/devices/dev-name/nvmem 596 * 597 * @config: nvmem device configuration with which nvmem device is created. 598 * 599 * Return: Will be an ERR_PTR() on error or a valid pointer to nvmem_device 600 * on success. 601 */ 602 603struct nvmem_device *nvmem_register(const struct nvmem_config *config) 604{ 605 struct nvmem_device *nvmem; 606 int rval; 607 608 if (!config->dev) 609 return ERR_PTR(-EINVAL); 610 611 nvmem = kzalloc(sizeof(*nvmem), GFP_KERNEL); 612 if (!nvmem) 613 return ERR_PTR(-ENOMEM); 614 615 rval = ida_simple_get(&nvmem_ida, 0, 0, GFP_KERNEL); 616 if (rval < 0) { 617 kfree(nvmem); 618 return ERR_PTR(rval); 619 } 620 621 kref_init(&nvmem->refcnt); 622 INIT_LIST_HEAD(&nvmem->cells); 623 624 nvmem->id = rval; 625 nvmem->owner = config->owner; 626 if (!nvmem->owner && config->dev->driver) 627 nvmem->owner = config->dev->driver->owner; 628 nvmem->stride = config->stride ?: 1; 629 nvmem->word_size = config->word_size ?: 1; 630 nvmem->size = config->size; 631 nvmem->dev.type = &nvmem_provider_type; 632 nvmem->dev.bus = &nvmem_bus_type; 633 nvmem->dev.parent = config->dev; 634 nvmem->priv = config->priv; 635 nvmem->type = config->type; 636 nvmem->reg_read = config->reg_read; 637 nvmem->reg_write = config->reg_write; 638 if (!config->no_of_node) 639 nvmem->dev.of_node = config->dev->of_node; 640 641 if (config->id == -1 && config->name) { 642 dev_set_name(&nvmem->dev, "%s", config->name); 643 } else { 644 dev_set_name(&nvmem->dev, "%s%d", 645 config->name ? : "nvmem", 646 config->name ? config->id : nvmem->id); 647 } 648 649 nvmem->read_only = device_property_present(config->dev, "read-only") | 650 config->read_only; 651 652 if (config->root_only) 653 nvmem->dev.groups = nvmem->read_only ? 654 nvmem_ro_root_dev_groups : 655 nvmem_rw_root_dev_groups; 656 else 657 nvmem->dev.groups = nvmem->read_only ? 658 nvmem_ro_dev_groups : 659 nvmem_rw_dev_groups; 660 661 device_initialize(&nvmem->dev); 662 663 dev_dbg(&nvmem->dev, "Registering nvmem device %s\n", config->name); 664 665 rval = device_add(&nvmem->dev); 666 if (rval) 667 goto err_put_device; 668 669 if (config->compat) { 670 rval = nvmem_setup_compat(nvmem, config); 671 if (rval) 672 goto err_device_del; 673 } 674 675 if (config->cells) { 676 rval = nvmem_add_cells(nvmem, config->cells, config->ncells); 677 if (rval) 678 goto err_teardown_compat; 679 } 680 681 rval = nvmem_add_cells_from_table(nvmem); 682 if (rval) 683 goto err_remove_cells; 684 685 rval = nvmem_add_cells_from_of(nvmem); 686 if (rval) 687 goto err_remove_cells; 688 689 rval = blocking_notifier_call_chain(&nvmem_notifier, NVMEM_ADD, nvmem); 690 if (rval) 691 goto err_remove_cells; 692 693 return nvmem; 694 695err_remove_cells: 696 nvmem_device_remove_all_cells(nvmem); 697err_teardown_compat: 698 if (config->compat) 699 device_remove_bin_file(nvmem->base_dev, &nvmem->eeprom); 700err_device_del: 701 device_del(&nvmem->dev); 702err_put_device: 703 put_device(&nvmem->dev); 704 705 return ERR_PTR(rval); 706} 707EXPORT_SYMBOL_GPL(nvmem_register); 708 709static void nvmem_device_release(struct kref *kref) 710{ 711 struct nvmem_device *nvmem; 712 713 nvmem = container_of(kref, struct nvmem_device, refcnt); 714 715 blocking_notifier_call_chain(&nvmem_notifier, NVMEM_REMOVE, nvmem); 716 717 if (nvmem->flags & FLAG_COMPAT) 718 device_remove_bin_file(nvmem->base_dev, &nvmem->eeprom); 719 720 nvmem_device_remove_all_cells(nvmem); 721 device_del(&nvmem->dev); 722 put_device(&nvmem->dev); 723} 724 725/** 726 * nvmem_unregister() - Unregister previously registered nvmem device 727 * 728 * @nvmem: Pointer to previously registered nvmem device. 729 */ 730void nvmem_unregister(struct nvmem_device *nvmem) 731{ 732 kref_put(&nvmem->refcnt, nvmem_device_release); 733} 734EXPORT_SYMBOL_GPL(nvmem_unregister); 735 736static void devm_nvmem_release(struct device *dev, void *res) 737{ 738 nvmem_unregister(*(struct nvmem_device **)res); 739} 740 741/** 742 * devm_nvmem_register() - Register a managed nvmem device for given 743 * nvmem_config. 744 * Also creates an binary entry in /sys/bus/nvmem/devices/dev-name/nvmem 745 * 746 * @dev: Device that uses the nvmem device. 747 * @config: nvmem device configuration with which nvmem device is created. 748 * 749 * Return: Will be an ERR_PTR() on error or a valid pointer to nvmem_device 750 * on success. 751 */ 752struct nvmem_device *devm_nvmem_register(struct device *dev, 753 const struct nvmem_config *config) 754{ 755 struct nvmem_device **ptr, *nvmem; 756 757 ptr = devres_alloc(devm_nvmem_release, sizeof(*ptr), GFP_KERNEL); 758 if (!ptr) 759 return ERR_PTR(-ENOMEM); 760 761 nvmem = nvmem_register(config); 762 763 if (!IS_ERR(nvmem)) { 764 *ptr = nvmem; 765 devres_add(dev, ptr); 766 } else { 767 devres_free(ptr); 768 } 769 770 return nvmem; 771} 772EXPORT_SYMBOL_GPL(devm_nvmem_register); 773 774static int devm_nvmem_match(struct device *dev, void *res, void *data) 775{ 776 struct nvmem_device **r = res; 777 778 return *r == data; 779} 780 781/** 782 * devm_nvmem_unregister() - Unregister previously registered managed nvmem 783 * device. 784 * 785 * @dev: Device that uses the nvmem device. 786 * @nvmem: Pointer to previously registered nvmem device. 787 * 788 * Return: Will be an negative on error or a zero on success. 789 */ 790int devm_nvmem_unregister(struct device *dev, struct nvmem_device *nvmem) 791{ 792 return devres_release(dev, devm_nvmem_release, devm_nvmem_match, nvmem); 793} 794EXPORT_SYMBOL(devm_nvmem_unregister); 795 796static struct nvmem_device *__nvmem_device_get(struct device_node *np, 797 const char *nvmem_name) 798{ 799 struct nvmem_device *nvmem = NULL; 800 801 mutex_lock(&nvmem_mutex); 802 nvmem = np ? of_nvmem_find(np) : nvmem_find(nvmem_name); 803 mutex_unlock(&nvmem_mutex); 804 if (!nvmem) 805 return ERR_PTR(-EPROBE_DEFER); 806 807 if (!try_module_get(nvmem->owner)) { 808 dev_err(&nvmem->dev, 809 "could not increase module refcount for cell %s\n", 810 nvmem_dev_name(nvmem)); 811 812 return ERR_PTR(-EINVAL); 813 } 814 815 kref_get(&nvmem->refcnt); 816 817 return nvmem; 818} 819 820static void __nvmem_device_put(struct nvmem_device *nvmem) 821{ 822 module_put(nvmem->owner); 823 kref_put(&nvmem->refcnt, nvmem_device_release); 824} 825 826#if IS_ENABLED(CONFIG_OF) 827/** 828 * of_nvmem_device_get() - Get nvmem device from a given id 829 * 830 * @np: Device tree node that uses the nvmem device. 831 * @id: nvmem name from nvmem-names property. 832 * 833 * Return: ERR_PTR() on error or a valid pointer to a struct nvmem_device 834 * on success. 835 */ 836struct nvmem_device *of_nvmem_device_get(struct device_node *np, const char *id) 837{ 838 839 struct device_node *nvmem_np; 840 int index; 841 842 index = of_property_match_string(np, "nvmem-names", id); 843 844 nvmem_np = of_parse_phandle(np, "nvmem", index); 845 if (!nvmem_np) 846 return ERR_PTR(-EINVAL); 847 848 return __nvmem_device_get(nvmem_np, NULL); 849} 850EXPORT_SYMBOL_GPL(of_nvmem_device_get); 851#endif 852 853/** 854 * nvmem_device_get() - Get nvmem device from a given id 855 * 856 * @dev: Device that uses the nvmem device. 857 * @dev_name: name of the requested nvmem device. 858 * 859 * Return: ERR_PTR() on error or a valid pointer to a struct nvmem_device 860 * on success. 861 */ 862struct nvmem_device *nvmem_device_get(struct device *dev, const char *dev_name) 863{ 864 if (dev->of_node) { /* try dt first */ 865 struct nvmem_device *nvmem; 866 867 nvmem = of_nvmem_device_get(dev->of_node, dev_name); 868 869 if (!IS_ERR(nvmem) || PTR_ERR(nvmem) == -EPROBE_DEFER) 870 return nvmem; 871 872 } 873 874 return nvmem_find(dev_name); 875} 876EXPORT_SYMBOL_GPL(nvmem_device_get); 877 878static int devm_nvmem_device_match(struct device *dev, void *res, void *data) 879{ 880 struct nvmem_device **nvmem = res; 881 882 if (WARN_ON(!nvmem || !*nvmem)) 883 return 0; 884 885 return *nvmem == data; 886} 887 888static void devm_nvmem_device_release(struct device *dev, void *res) 889{ 890 nvmem_device_put(*(struct nvmem_device **)res); 891} 892 893/** 894 * devm_nvmem_device_put() - put alredy got nvmem device 895 * 896 * @dev: Device that uses the nvmem device. 897 * @nvmem: pointer to nvmem device allocated by devm_nvmem_cell_get(), 898 * that needs to be released. 899 */ 900void devm_nvmem_device_put(struct device *dev, struct nvmem_device *nvmem) 901{ 902 int ret; 903 904 ret = devres_release(dev, devm_nvmem_device_release, 905 devm_nvmem_device_match, nvmem); 906 907 WARN_ON(ret); 908} 909EXPORT_SYMBOL_GPL(devm_nvmem_device_put); 910 911/** 912 * nvmem_device_put() - put alredy got nvmem device 913 * 914 * @nvmem: pointer to nvmem device that needs to be released. 915 */ 916void nvmem_device_put(struct nvmem_device *nvmem) 917{ 918 __nvmem_device_put(nvmem); 919} 920EXPORT_SYMBOL_GPL(nvmem_device_put); 921 922/** 923 * devm_nvmem_device_get() - Get nvmem cell of device form a given id 924 * 925 * @dev: Device that requests the nvmem device. 926 * @id: name id for the requested nvmem device. 927 * 928 * Return: ERR_PTR() on error or a valid pointer to a struct nvmem_cell 929 * on success. The nvmem_cell will be freed by the automatically once the 930 * device is freed. 931 */ 932struct nvmem_device *devm_nvmem_device_get(struct device *dev, const char *id) 933{ 934 struct nvmem_device **ptr, *nvmem; 935 936 ptr = devres_alloc(devm_nvmem_device_release, sizeof(*ptr), GFP_KERNEL); 937 if (!ptr) 938 return ERR_PTR(-ENOMEM); 939 940 nvmem = nvmem_device_get(dev, id); 941 if (!IS_ERR(nvmem)) { 942 *ptr = nvmem; 943 devres_add(dev, ptr); 944 } else { 945 devres_free(ptr); 946 } 947 948 return nvmem; 949} 950EXPORT_SYMBOL_GPL(devm_nvmem_device_get); 951 952static struct nvmem_cell * 953nvmem_cell_get_from_lookup(struct device *dev, const char *con_id) 954{ 955 struct nvmem_cell *cell = ERR_PTR(-ENOENT); 956 struct nvmem_cell_lookup *lookup; 957 struct nvmem_device *nvmem; 958 const char *dev_id; 959 960 if (!dev) 961 return ERR_PTR(-EINVAL); 962 963 dev_id = dev_name(dev); 964 965 mutex_lock(&nvmem_lookup_mutex); 966 967 list_for_each_entry(lookup, &nvmem_lookup_list, node) { 968 if ((strcmp(lookup->dev_id, dev_id) == 0) && 969 (strcmp(lookup->con_id, con_id) == 0)) { 970 /* This is the right entry. */ 971 nvmem = __nvmem_device_get(NULL, lookup->nvmem_name); 972 if (IS_ERR(nvmem)) { 973 /* Provider may not be registered yet. */ 974 cell = ERR_CAST(nvmem); 975 goto out; 976 } 977 978 cell = nvmem_find_cell_by_name(nvmem, 979 lookup->cell_name); 980 if (!cell) { 981 __nvmem_device_put(nvmem); 982 cell = ERR_PTR(-ENOENT); 983 goto out; 984 } 985 } 986 } 987 988out: 989 mutex_unlock(&nvmem_lookup_mutex); 990 return cell; 991} 992 993#if IS_ENABLED(CONFIG_OF) 994static struct nvmem_cell * 995nvmem_find_cell_by_node(struct nvmem_device *nvmem, struct device_node *np) 996{ 997 struct nvmem_cell *cell = NULL; 998 999 mutex_lock(&nvmem_mutex); 1000 list_for_each_entry(cell, &nvmem->cells, node) { 1001 if (np == cell->np) 1002 break; 1003 } 1004 mutex_unlock(&nvmem_mutex); 1005 1006 return cell; 1007} 1008 1009/** 1010 * of_nvmem_cell_get() - Get a nvmem cell from given device node and cell id 1011 * 1012 * @np: Device tree node that uses the nvmem cell. 1013 * @id: nvmem cell name from nvmem-cell-names property, or NULL 1014 * for the cell at index 0 (the lone cell with no accompanying 1015 * nvmem-cell-names property). 1016 * 1017 * Return: Will be an ERR_PTR() on error or a valid pointer 1018 * to a struct nvmem_cell. The nvmem_cell will be freed by the 1019 * nvmem_cell_put(). 1020 */ 1021struct nvmem_cell *of_nvmem_cell_get(struct device_node *np, const char *id) 1022{ 1023 struct device_node *cell_np, *nvmem_np; 1024 struct nvmem_device *nvmem; 1025 struct nvmem_cell *cell; 1026 int index = 0; 1027 1028 /* if cell name exists, find index to the name */ 1029 if (id) 1030 index = of_property_match_string(np, "nvmem-cell-names", id); 1031 1032 cell_np = of_parse_phandle(np, "nvmem-cells", index); 1033 if (!cell_np) 1034 return ERR_PTR(-EINVAL); 1035 1036 nvmem_np = of_get_next_parent(cell_np); 1037 if (!nvmem_np) 1038 return ERR_PTR(-EINVAL); 1039 1040 nvmem = __nvmem_device_get(nvmem_np, NULL); 1041 of_node_put(nvmem_np); 1042 if (IS_ERR(nvmem)) 1043 return ERR_CAST(nvmem); 1044 1045 cell = nvmem_find_cell_by_node(nvmem, cell_np); 1046 if (!cell) { 1047 __nvmem_device_put(nvmem); 1048 return ERR_PTR(-ENOENT); 1049 } 1050 1051 return cell; 1052} 1053EXPORT_SYMBOL_GPL(of_nvmem_cell_get); 1054#endif 1055 1056/** 1057 * nvmem_cell_get() - Get nvmem cell of device form a given cell name 1058 * 1059 * @dev: Device that requests the nvmem cell. 1060 * @id: nvmem cell name to get (this corresponds with the name from the 1061 * nvmem-cell-names property for DT systems and with the con_id from 1062 * the lookup entry for non-DT systems). 1063 * 1064 * Return: Will be an ERR_PTR() on error or a valid pointer 1065 * to a struct nvmem_cell. The nvmem_cell will be freed by the 1066 * nvmem_cell_put(). 1067 */ 1068struct nvmem_cell *nvmem_cell_get(struct device *dev, const char *id) 1069{ 1070 struct nvmem_cell *cell; 1071 1072 if (dev->of_node) { /* try dt first */ 1073 cell = of_nvmem_cell_get(dev->of_node, id); 1074 if (!IS_ERR(cell) || PTR_ERR(cell) == -EPROBE_DEFER) 1075 return cell; 1076 } 1077 1078 /* NULL cell id only allowed for device tree; invalid otherwise */ 1079 if (!id) 1080 return ERR_PTR(-EINVAL); 1081 1082 return nvmem_cell_get_from_lookup(dev, id); 1083} 1084EXPORT_SYMBOL_GPL(nvmem_cell_get); 1085 1086static void devm_nvmem_cell_release(struct device *dev, void *res) 1087{ 1088 nvmem_cell_put(*(struct nvmem_cell **)res); 1089} 1090 1091/** 1092 * devm_nvmem_cell_get() - Get nvmem cell of device form a given id 1093 * 1094 * @dev: Device that requests the nvmem cell. 1095 * @id: nvmem cell name id to get. 1096 * 1097 * Return: Will be an ERR_PTR() on error or a valid pointer 1098 * to a struct nvmem_cell. The nvmem_cell will be freed by the 1099 * automatically once the device is freed. 1100 */ 1101struct nvmem_cell *devm_nvmem_cell_get(struct device *dev, const char *id) 1102{ 1103 struct nvmem_cell **ptr, *cell; 1104 1105 ptr = devres_alloc(devm_nvmem_cell_release, sizeof(*ptr), GFP_KERNEL); 1106 if (!ptr) 1107 return ERR_PTR(-ENOMEM); 1108 1109 cell = nvmem_cell_get(dev, id); 1110 if (!IS_ERR(cell)) { 1111 *ptr = cell; 1112 devres_add(dev, ptr); 1113 } else { 1114 devres_free(ptr); 1115 } 1116 1117 return cell; 1118} 1119EXPORT_SYMBOL_GPL(devm_nvmem_cell_get); 1120 1121static int devm_nvmem_cell_match(struct device *dev, void *res, void *data) 1122{ 1123 struct nvmem_cell **c = res; 1124 1125 if (WARN_ON(!c || !*c)) 1126 return 0; 1127 1128 return *c == data; 1129} 1130 1131/** 1132 * devm_nvmem_cell_put() - Release previously allocated nvmem cell 1133 * from devm_nvmem_cell_get. 1134 * 1135 * @dev: Device that requests the nvmem cell. 1136 * @cell: Previously allocated nvmem cell by devm_nvmem_cell_get(). 1137 */ 1138void devm_nvmem_cell_put(struct device *dev, struct nvmem_cell *cell) 1139{ 1140 int ret; 1141 1142 ret = devres_release(dev, devm_nvmem_cell_release, 1143 devm_nvmem_cell_match, cell); 1144 1145 WARN_ON(ret); 1146} 1147EXPORT_SYMBOL(devm_nvmem_cell_put); 1148 1149/** 1150 * nvmem_cell_put() - Release previously allocated nvmem cell. 1151 * 1152 * @cell: Previously allocated nvmem cell by nvmem_cell_get(). 1153 */ 1154void nvmem_cell_put(struct nvmem_cell *cell) 1155{ 1156 struct nvmem_device *nvmem = cell->nvmem; 1157 1158 __nvmem_device_put(nvmem); 1159} 1160EXPORT_SYMBOL_GPL(nvmem_cell_put); 1161 1162static void nvmem_shift_read_buffer_in_place(struct nvmem_cell *cell, void *buf) 1163{ 1164 u8 *p, *b; 1165 int i, bit_offset = cell->bit_offset; 1166 1167 p = b = buf; 1168 if (bit_offset) { 1169 /* First shift */ 1170 *b++ >>= bit_offset; 1171 1172 /* setup rest of the bytes if any */ 1173 for (i = 1; i < cell->bytes; i++) { 1174 /* Get bits from next byte and shift them towards msb */ 1175 *p |= *b << (BITS_PER_BYTE - bit_offset); 1176 1177 p = b; 1178 *b++ >>= bit_offset; 1179 } 1180 1181 /* result fits in less bytes */ 1182 if (cell->bytes != DIV_ROUND_UP(cell->nbits, BITS_PER_BYTE)) 1183 *p-- = 0; 1184 } 1185 /* clear msb bits if any leftover in the last byte */ 1186 *p &= GENMASK((cell->nbits%BITS_PER_BYTE) - 1, 0); 1187} 1188 1189static int __nvmem_cell_read(struct nvmem_device *nvmem, 1190 struct nvmem_cell *cell, 1191 void *buf, size_t *len) 1192{ 1193 int rc; 1194 1195 rc = nvmem_reg_read(nvmem, cell->offset, buf, cell->bytes); 1196 1197 if (rc) 1198 return rc; 1199 1200 /* shift bits in-place */ 1201 if (cell->bit_offset || cell->nbits) 1202 nvmem_shift_read_buffer_in_place(cell, buf); 1203 1204 if (len) 1205 *len = cell->bytes; 1206 1207 return 0; 1208} 1209 1210/** 1211 * nvmem_cell_read() - Read a given nvmem cell 1212 * 1213 * @cell: nvmem cell to be read. 1214 * @len: pointer to length of cell which will be populated on successful read; 1215 * can be NULL. 1216 * 1217 * Return: ERR_PTR() on error or a valid pointer to a buffer on success. The 1218 * buffer should be freed by the consumer with a kfree(). 1219 */ 1220void *nvmem_cell_read(struct nvmem_cell *cell, size_t *len) 1221{ 1222 struct nvmem_device *nvmem = cell->nvmem; 1223 u8 *buf; 1224 int rc; 1225 1226 if (!nvmem) 1227 return ERR_PTR(-EINVAL); 1228 1229 buf = kzalloc(cell->bytes, GFP_KERNEL); 1230 if (!buf) 1231 return ERR_PTR(-ENOMEM); 1232 1233 rc = __nvmem_cell_read(nvmem, cell, buf, len); 1234 if (rc) { 1235 kfree(buf); 1236 return ERR_PTR(rc); 1237 } 1238 1239 return buf; 1240} 1241EXPORT_SYMBOL_GPL(nvmem_cell_read); 1242 1243static void *nvmem_cell_prepare_write_buffer(struct nvmem_cell *cell, 1244 u8 *_buf, int len) 1245{ 1246 struct nvmem_device *nvmem = cell->nvmem; 1247 int i, rc, nbits, bit_offset = cell->bit_offset; 1248 u8 v, *p, *buf, *b, pbyte, pbits; 1249 1250 nbits = cell->nbits; 1251 buf = kzalloc(cell->bytes, GFP_KERNEL); 1252 if (!buf) 1253 return ERR_PTR(-ENOMEM); 1254 1255 memcpy(buf, _buf, len); 1256 p = b = buf; 1257 1258 if (bit_offset) { 1259 pbyte = *b; 1260 *b <<= bit_offset; 1261 1262 /* setup the first byte with lsb bits from nvmem */ 1263 rc = nvmem_reg_read(nvmem, cell->offset, &v, 1); 1264 if (rc) 1265 goto err; 1266 *b++ |= GENMASK(bit_offset - 1, 0) & v; 1267 1268 /* setup rest of the byte if any */ 1269 for (i = 1; i < cell->bytes; i++) { 1270 /* Get last byte bits and shift them towards lsb */ 1271 pbits = pbyte >> (BITS_PER_BYTE - 1 - bit_offset); 1272 pbyte = *b; 1273 p = b; 1274 *b <<= bit_offset; 1275 *b++ |= pbits; 1276 } 1277 } 1278 1279 /* if it's not end on byte boundary */ 1280 if ((nbits + bit_offset) % BITS_PER_BYTE) { 1281 /* setup the last byte with msb bits from nvmem */ 1282 rc = nvmem_reg_read(nvmem, 1283 cell->offset + cell->bytes - 1, &v, 1); 1284 if (rc) 1285 goto err; 1286 *p |= GENMASK(7, (nbits + bit_offset) % BITS_PER_BYTE) & v; 1287 1288 } 1289 1290 return buf; 1291err: 1292 kfree(buf); 1293 return ERR_PTR(rc); 1294} 1295 1296/** 1297 * nvmem_cell_write() - Write to a given nvmem cell 1298 * 1299 * @cell: nvmem cell to be written. 1300 * @buf: Buffer to be written. 1301 * @len: length of buffer to be written to nvmem cell. 1302 * 1303 * Return: length of bytes written or negative on failure. 1304 */ 1305int nvmem_cell_write(struct nvmem_cell *cell, void *buf, size_t len) 1306{ 1307 struct nvmem_device *nvmem = cell->nvmem; 1308 int rc; 1309 1310 if (!nvmem || nvmem->read_only || 1311 (cell->bit_offset == 0 && len != cell->bytes)) 1312 return -EINVAL; 1313 1314 if (cell->bit_offset || cell->nbits) { 1315 buf = nvmem_cell_prepare_write_buffer(cell, buf, len); 1316 if (IS_ERR(buf)) 1317 return PTR_ERR(buf); 1318 } 1319 1320 rc = nvmem_reg_write(nvmem, cell->offset, buf, cell->bytes); 1321 1322 /* free the tmp buffer */ 1323 if (cell->bit_offset || cell->nbits) 1324 kfree(buf); 1325 1326 if (rc) 1327 return rc; 1328 1329 return len; 1330} 1331EXPORT_SYMBOL_GPL(nvmem_cell_write); 1332 1333/** 1334 * nvmem_cell_read_u32() - Read a cell value as an u32 1335 * 1336 * @dev: Device that requests the nvmem cell. 1337 * @cell_id: Name of nvmem cell to read. 1338 * @val: pointer to output value. 1339 * 1340 * Return: 0 on success or negative errno. 1341 */ 1342int nvmem_cell_read_u32(struct device *dev, const char *cell_id, u32 *val) 1343{ 1344 struct nvmem_cell *cell; 1345 void *buf; 1346 size_t len; 1347 1348 cell = nvmem_cell_get(dev, cell_id); 1349 if (IS_ERR(cell)) 1350 return PTR_ERR(cell); 1351 1352 buf = nvmem_cell_read(cell, &len); 1353 if (IS_ERR(buf)) { 1354 nvmem_cell_put(cell); 1355 return PTR_ERR(buf); 1356 } 1357 if (len != sizeof(*val)) { 1358 kfree(buf); 1359 nvmem_cell_put(cell); 1360 return -EINVAL; 1361 } 1362 memcpy(val, buf, sizeof(*val)); 1363 1364 kfree(buf); 1365 nvmem_cell_put(cell); 1366 return 0; 1367} 1368EXPORT_SYMBOL_GPL(nvmem_cell_read_u32); 1369 1370/** 1371 * nvmem_device_cell_read() - Read a given nvmem device and cell 1372 * 1373 * @nvmem: nvmem device to read from. 1374 * @info: nvmem cell info to be read. 1375 * @buf: buffer pointer which will be populated on successful read. 1376 * 1377 * Return: length of successful bytes read on success and negative 1378 * error code on error. 1379 */ 1380ssize_t nvmem_device_cell_read(struct nvmem_device *nvmem, 1381 struct nvmem_cell_info *info, void *buf) 1382{ 1383 struct nvmem_cell cell; 1384 int rc; 1385 ssize_t len; 1386 1387 if (!nvmem) 1388 return -EINVAL; 1389 1390 rc = nvmem_cell_info_to_nvmem_cell(nvmem, info, &cell); 1391 if (rc) 1392 return rc; 1393 1394 rc = __nvmem_cell_read(nvmem, &cell, buf, &len); 1395 if (rc) 1396 return rc; 1397 1398 return len; 1399} 1400EXPORT_SYMBOL_GPL(nvmem_device_cell_read); 1401 1402/** 1403 * nvmem_device_cell_write() - Write cell to a given nvmem device 1404 * 1405 * @nvmem: nvmem device to be written to. 1406 * @info: nvmem cell info to be written. 1407 * @buf: buffer to be written to cell. 1408 * 1409 * Return: length of bytes written or negative error code on failure. 1410 */ 1411int nvmem_device_cell_write(struct nvmem_device *nvmem, 1412 struct nvmem_cell_info *info, void *buf) 1413{ 1414 struct nvmem_cell cell; 1415 int rc; 1416 1417 if (!nvmem) 1418 return -EINVAL; 1419 1420 rc = nvmem_cell_info_to_nvmem_cell(nvmem, info, &cell); 1421 if (rc) 1422 return rc; 1423 1424 return nvmem_cell_write(&cell, buf, cell.bytes); 1425} 1426EXPORT_SYMBOL_GPL(nvmem_device_cell_write); 1427 1428/** 1429 * nvmem_device_read() - Read from a given nvmem device 1430 * 1431 * @nvmem: nvmem device to read from. 1432 * @offset: offset in nvmem device. 1433 * @bytes: number of bytes to read. 1434 * @buf: buffer pointer which will be populated on successful read. 1435 * 1436 * Return: length of successful bytes read on success and negative 1437 * error code on error. 1438 */ 1439int nvmem_device_read(struct nvmem_device *nvmem, 1440 unsigned int offset, 1441 size_t bytes, void *buf) 1442{ 1443 int rc; 1444 1445 if (!nvmem) 1446 return -EINVAL; 1447 1448 rc = nvmem_reg_read(nvmem, offset, buf, bytes); 1449 1450 if (rc) 1451 return rc; 1452 1453 return bytes; 1454} 1455EXPORT_SYMBOL_GPL(nvmem_device_read); 1456 1457/** 1458 * nvmem_device_write() - Write cell to a given nvmem device 1459 * 1460 * @nvmem: nvmem device to be written to. 1461 * @offset: offset in nvmem device. 1462 * @bytes: number of bytes to write. 1463 * @buf: buffer to be written. 1464 * 1465 * Return: length of bytes written or negative error code on failure. 1466 */ 1467int nvmem_device_write(struct nvmem_device *nvmem, 1468 unsigned int offset, 1469 size_t bytes, void *buf) 1470{ 1471 int rc; 1472 1473 if (!nvmem) 1474 return -EINVAL; 1475 1476 rc = nvmem_reg_write(nvmem, offset, buf, bytes); 1477 1478 if (rc) 1479 return rc; 1480 1481 1482 return bytes; 1483} 1484EXPORT_SYMBOL_GPL(nvmem_device_write); 1485 1486/** 1487 * nvmem_add_cell_table() - register a table of cell info entries 1488 * 1489 * @table: table of cell info entries 1490 */ 1491void nvmem_add_cell_table(struct nvmem_cell_table *table) 1492{ 1493 mutex_lock(&nvmem_cell_mutex); 1494 list_add_tail(&table->node, &nvmem_cell_tables); 1495 mutex_unlock(&nvmem_cell_mutex); 1496} 1497EXPORT_SYMBOL_GPL(nvmem_add_cell_table); 1498 1499/** 1500 * nvmem_del_cell_table() - remove a previously registered cell info table 1501 * 1502 * @table: table of cell info entries 1503 */ 1504void nvmem_del_cell_table(struct nvmem_cell_table *table) 1505{ 1506 mutex_lock(&nvmem_cell_mutex); 1507 list_del(&table->node); 1508 mutex_unlock(&nvmem_cell_mutex); 1509} 1510EXPORT_SYMBOL_GPL(nvmem_del_cell_table); 1511 1512/** 1513 * nvmem_add_cell_lookups() - register a list of cell lookup entries 1514 * 1515 * @entries: array of cell lookup entries 1516 * @nentries: number of cell lookup entries in the array 1517 */ 1518void nvmem_add_cell_lookups(struct nvmem_cell_lookup *entries, size_t nentries) 1519{ 1520 int i; 1521 1522 mutex_lock(&nvmem_lookup_mutex); 1523 for (i = 0; i < nentries; i++) 1524 list_add_tail(&entries[i].node, &nvmem_lookup_list); 1525 mutex_unlock(&nvmem_lookup_mutex); 1526} 1527EXPORT_SYMBOL_GPL(nvmem_add_cell_lookups); 1528 1529/** 1530 * nvmem_del_cell_lookups() - remove a list of previously added cell lookup 1531 * entries 1532 * 1533 * @entries: array of cell lookup entries 1534 * @nentries: number of cell lookup entries in the array 1535 */ 1536void nvmem_del_cell_lookups(struct nvmem_cell_lookup *entries, size_t nentries) 1537{ 1538 int i; 1539 1540 mutex_lock(&nvmem_lookup_mutex); 1541 for (i = 0; i < nentries; i++) 1542 list_del(&entries[i].node); 1543 mutex_unlock(&nvmem_lookup_mutex); 1544} 1545EXPORT_SYMBOL_GPL(nvmem_del_cell_lookups); 1546 1547/** 1548 * nvmem_dev_name() - Get the name of a given nvmem device. 1549 * 1550 * @nvmem: nvmem device. 1551 * 1552 * Return: name of the nvmem device. 1553 */ 1554const char *nvmem_dev_name(struct nvmem_device *nvmem) 1555{ 1556 return dev_name(&nvmem->dev); 1557} 1558EXPORT_SYMBOL_GPL(nvmem_dev_name); 1559 1560static int __init nvmem_init(void) 1561{ 1562 return bus_register(&nvmem_bus_type); 1563} 1564 1565static void __exit nvmem_exit(void) 1566{ 1567 bus_unregister(&nvmem_bus_type); 1568} 1569 1570subsys_initcall(nvmem_init); 1571module_exit(nvmem_exit); 1572 1573MODULE_AUTHOR("Srinivas Kandagatla <srinivas.kandagatla@linaro.org"); 1574MODULE_AUTHOR("Maxime Ripard <maxime.ripard@free-electrons.com"); 1575MODULE_DESCRIPTION("nvmem Driver Core"); 1576MODULE_LICENSE("GPL v2");