Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

regmap: Implementation for regmap_multi_reg_write

This is the implementation of regmap_multi_reg_write()

There is a new capability 'can_multi_write' that device drivers
must set in order to use this multi reg write mode.

This replaces the first definition, which just defined the API.

Signed-off-by: Anthony Olech <anthony.olech.opensource@diasemi.com>
Signed-off-by: Mark Brown <broonie@linaro.org>

authored by

Opensource [Anthony Olech] and committed by
Mark Brown
e894c3f4 1c18d2ca

+180 -18
+2
drivers/base/regmap/internal.h
··· 134 134 135 135 /* if set, converts bulk rw to single rw */ 136 136 bool use_single_rw; 137 + /* if set, the device supports multi write mode */ 138 + bool can_multi_write; 137 139 138 140 struct rb_root range_tree; 139 141 void *selector_work_buf; /* Scratch buffer used for selector */
+174 -18
drivers/base/regmap/regmap.c
··· 439 439 else 440 440 map->reg_stride = 1; 441 441 map->use_single_rw = config->use_single_rw; 442 + map->can_multi_write = config->can_multi_write; 442 443 map->dev = dev; 443 444 map->bus = bus; 444 445 map->bus_context = bus_context; ··· 1577 1576 } 1578 1577 EXPORT_SYMBOL_GPL(regmap_bulk_write); 1579 1578 1580 - static int _regmap_multi_reg_write(struct regmap *map, 1581 - const struct reg_default *regs, 1582 - int num_regs) 1579 + /* 1580 + * _regmap_raw_multi_reg_write() 1581 + * 1582 + * the (register,newvalue) pairs in regs have not been formatted, but 1583 + * they are all in the same page and have been changed to being page 1584 + * relative. The page register has been written if that was neccessary. 1585 + */ 1586 + static int _regmap_raw_multi_reg_write(struct regmap *map, 1587 + const struct reg_default *regs, 1588 + size_t num_regs) 1583 1589 { 1584 - int i, ret; 1590 + int ret; 1591 + void *buf; 1592 + int i; 1593 + u8 *u8; 1594 + size_t val_bytes = map->format.val_bytes; 1595 + size_t reg_bytes = map->format.reg_bytes; 1596 + size_t pad_bytes = map->format.pad_bytes; 1597 + size_t pair_size = reg_bytes + pad_bytes + val_bytes; 1598 + size_t len = pair_size * num_regs; 1599 + 1600 + buf = kzalloc(len, GFP_KERNEL); 1601 + if (!buf) 1602 + return -ENOMEM; 1603 + 1604 + /* We have to linearise by hand. */ 1605 + 1606 + u8 = buf; 1585 1607 1586 1608 for (i = 0; i < num_regs; i++) { 1587 - if (regs[i].reg % map->reg_stride) 1588 - return -EINVAL; 1589 - ret = _regmap_write(map, regs[i].reg, regs[i].def); 1590 - if (ret != 0) { 1591 - dev_err(map->dev, "Failed to write %x = %x: %d\n", 1592 - regs[i].reg, regs[i].def, ret); 1593 - return ret; 1609 + int reg = regs[i].reg; 1610 + int val = regs[i].def; 1611 + trace_regmap_hw_write_start(map->dev, reg, 1); 1612 + map->format.format_reg(u8, reg, map->reg_shift); 1613 + u8 += reg_bytes + pad_bytes; 1614 + map->format.format_val(u8, val, 0); 1615 + u8 += val_bytes; 1616 + } 1617 + u8 = buf; 1618 + *u8 |= map->write_flag_mask; 1619 + 1620 + ret = map->bus->write(map->bus_context, buf, len); 1621 + 1622 + kfree(buf); 1623 + 1624 + for (i = 0; i < num_regs; i++) { 1625 + int reg = regs[i].reg; 1626 + trace_regmap_hw_write_done(map->dev, reg, 1); 1627 + } 1628 + return ret; 1629 + } 1630 + 1631 + static unsigned int _regmap_register_page(struct regmap *map, 1632 + unsigned int reg, 1633 + struct regmap_range_node *range) 1634 + { 1635 + unsigned int win_page = (reg - range->range_min) / range->window_len; 1636 + 1637 + return win_page; 1638 + } 1639 + 1640 + static int _regmap_range_multi_paged_reg_write(struct regmap *map, 1641 + struct reg_default *regs, 1642 + size_t num_regs) 1643 + { 1644 + int ret; 1645 + int i, n; 1646 + struct reg_default *base; 1647 + unsigned int this_page; 1648 + /* 1649 + * the set of registers are not neccessarily in order, but 1650 + * since the order of write must be preserved this algorithm 1651 + * chops the set each time the page changes 1652 + */ 1653 + base = regs; 1654 + for (i = 0, n = 0; i < num_regs; i++, n++) { 1655 + unsigned int reg = regs[i].reg; 1656 + struct regmap_range_node *range; 1657 + 1658 + range = _regmap_range_lookup(map, reg); 1659 + if (range) { 1660 + unsigned int win_page = _regmap_register_page(map, reg, 1661 + range); 1662 + 1663 + if (i == 0) 1664 + this_page = win_page; 1665 + if (win_page != this_page) { 1666 + this_page = win_page; 1667 + ret = _regmap_raw_multi_reg_write(map, base, n); 1668 + if (ret != 0) 1669 + return ret; 1670 + base += n; 1671 + n = 0; 1672 + } 1673 + ret = _regmap_select_page(map, &base[n].reg, range, 1); 1674 + if (ret != 0) 1675 + return ret; 1676 + } 1677 + } 1678 + if (n > 0) 1679 + return _regmap_raw_multi_reg_write(map, base, n); 1680 + return 0; 1681 + } 1682 + 1683 + static int _regmap_multi_reg_write(struct regmap *map, 1684 + const struct reg_default *regs, 1685 + size_t num_regs) 1686 + { 1687 + int i; 1688 + int ret; 1689 + 1690 + if (!map->can_multi_write) { 1691 + for (i = 0; i < num_regs; i++) { 1692 + ret = _regmap_write(map, regs[i].reg, regs[i].def); 1693 + if (ret != 0) 1694 + return ret; 1695 + } 1696 + return 0; 1697 + } 1698 + 1699 + if (!map->format.parse_inplace) 1700 + return -EINVAL; 1701 + 1702 + if (map->writeable_reg) 1703 + for (i = 0; i < num_regs; i++) { 1704 + int reg = regs[i].reg; 1705 + if (!map->writeable_reg(map->dev, reg)) 1706 + return -EINVAL; 1707 + if (reg % map->reg_stride) 1708 + return -EINVAL; 1709 + } 1710 + 1711 + if (!map->cache_bypass) { 1712 + for (i = 0; i < num_regs; i++) { 1713 + unsigned int val = regs[i].def; 1714 + unsigned int reg = regs[i].reg; 1715 + ret = regcache_write(map, reg, val); 1716 + if (ret) { 1717 + dev_err(map->dev, 1718 + "Error in caching of register: %x ret: %d\n", 1719 + reg, ret); 1720 + return ret; 1721 + } 1722 + } 1723 + if (map->cache_only) { 1724 + map->cache_dirty = true; 1725 + return 0; 1594 1726 } 1595 1727 } 1596 1728 1597 - return 0; 1729 + WARN_ON(!map->bus); 1730 + 1731 + for (i = 0; i < num_regs; i++) { 1732 + unsigned int reg = regs[i].reg; 1733 + struct regmap_range_node *range; 1734 + range = _regmap_range_lookup(map, reg); 1735 + if (range) { 1736 + size_t len = sizeof(struct reg_default)*num_regs; 1737 + struct reg_default *base = kmemdup(regs, len, 1738 + GFP_KERNEL); 1739 + if (!base) 1740 + return -ENOMEM; 1741 + ret = _regmap_range_multi_paged_reg_write(map, base, 1742 + num_regs); 1743 + kfree(base); 1744 + 1745 + return ret; 1746 + } 1747 + } 1748 + return _regmap_raw_multi_reg_write(map, regs, num_regs); 1598 1749 } 1599 1750 1600 1751 /* 1601 1752 * regmap_multi_reg_write(): Write multiple registers to the device 1602 1753 * 1603 - * where the set of register are supplied in any order 1754 + * where the set of register,value pairs are supplied in any order, 1755 + * possibly not all in a single range. 1604 1756 * 1605 1757 * @map: Register map to write to 1606 1758 * @regs: Array of structures containing register,value to be written 1607 1759 * @num_regs: Number of registers to write 1608 1760 * 1609 - * This function is intended to be used for writing a large block of data 1610 - * atomically to the device in single transfer for those I2C client devices 1611 - * that implement this alternative block write mode. 1761 + * The 'normal' block write mode will send ultimately send data on the 1762 + * target bus as R,V1,V2,V3,..,Vn where successively higer registers are 1763 + * addressed. However, this alternative block multi write mode will send 1764 + * the data as R1,V1,R2,V2,..,Rn,Vn on the target bus. The target device 1765 + * must of course support the mode. 1612 1766 * 1613 - * A value of zero will be returned on success, a negative errno will 1614 - * be returned in error cases. 1767 + * A value of zero will be returned on success, a negative errno will be 1768 + * returned in error cases. 1615 1769 */ 1616 1770 int regmap_multi_reg_write(struct regmap *map, const struct reg_default *regs, 1617 1771 int num_regs)
+4
include/linux/regmap.h
··· 164 164 * @use_single_rw: If set, converts the bulk read and write operations into 165 165 * a series of single read and write operations. This is useful 166 166 * for device that does not support bulk read and write. 167 + * @can_multi_write: If set, the device supports the multi write mode of bulk 168 + * write operations, if clear multi write requests will be 169 + * split into individual write operations 167 170 * 168 171 * @cache_type: The actual cache type. 169 172 * @reg_defaults_raw: Power on reset values for registers (for use with ··· 218 215 u8 write_flag_mask; 219 216 220 217 bool use_single_rw; 218 + bool can_multi_write; 221 219 222 220 enum regmap_endian reg_format_endian; 223 221 enum regmap_endian val_format_endian;