at v4.20 7.8 kB view raw
1// SPDX-License-Identifier: GPL-2.0+ 2/* 3 * Copyright (C) 2017 HiSilicon Limited, All Rights Reserved. 4 * Author: Gabriele Paoloni <gabriele.paoloni@huawei.com> 5 * Author: Zhichang Yuan <yuanzhichang@hisilicon.com> 6 */ 7 8#define pr_fmt(fmt) "LOGIC PIO: " fmt 9 10#include <linux/of.h> 11#include <linux/io.h> 12#include <linux/logic_pio.h> 13#include <linux/mm.h> 14#include <linux/rculist.h> 15#include <linux/sizes.h> 16#include <linux/slab.h> 17 18/* The unique hardware address list */ 19static LIST_HEAD(io_range_list); 20static DEFINE_MUTEX(io_range_mutex); 21 22/* Consider a kernel general helper for this */ 23#define in_range(b, first, len) ((b) >= (first) && (b) < (first) + (len)) 24 25/** 26 * logic_pio_register_range - register logical PIO range for a host 27 * @new_range: pointer to the IO range to be registered. 28 * 29 * Returns 0 on success, the error code in case of failure. 30 * 31 * Register a new IO range node in the IO range list. 32 */ 33int logic_pio_register_range(struct logic_pio_hwaddr *new_range) 34{ 35 struct logic_pio_hwaddr *range; 36 resource_size_t start; 37 resource_size_t end; 38 resource_size_t mmio_sz = 0; 39 resource_size_t iio_sz = MMIO_UPPER_LIMIT; 40 int ret = 0; 41 42 if (!new_range || !new_range->fwnode || !new_range->size) 43 return -EINVAL; 44 45 start = new_range->hw_start; 46 end = new_range->hw_start + new_range->size; 47 48 mutex_lock(&io_range_mutex); 49 list_for_each_entry_rcu(range, &io_range_list, list) { 50 if (range->fwnode == new_range->fwnode) { 51 /* range already there */ 52 goto end_register; 53 } 54 if (range->flags == LOGIC_PIO_CPU_MMIO && 55 new_range->flags == LOGIC_PIO_CPU_MMIO) { 56 /* for MMIO ranges we need to check for overlap */ 57 if (start >= range->hw_start + range->size || 58 end < range->hw_start) { 59 mmio_sz += range->size; 60 } else { 61 ret = -EFAULT; 62 goto end_register; 63 } 64 } else if (range->flags == LOGIC_PIO_INDIRECT && 65 new_range->flags == LOGIC_PIO_INDIRECT) { 66 iio_sz += range->size; 67 } 68 } 69 70 /* range not registered yet, check for available space */ 71 if (new_range->flags == LOGIC_PIO_CPU_MMIO) { 72 if (mmio_sz + new_range->size - 1 > MMIO_UPPER_LIMIT) { 73 /* if it's too big check if 64K space can be reserved */ 74 if (mmio_sz + SZ_64K - 1 > MMIO_UPPER_LIMIT) { 75 ret = -E2BIG; 76 goto end_register; 77 } 78 new_range->size = SZ_64K; 79 pr_warn("Requested IO range too big, new size set to 64K\n"); 80 } 81 new_range->io_start = mmio_sz; 82 } else if (new_range->flags == LOGIC_PIO_INDIRECT) { 83 if (iio_sz + new_range->size - 1 > IO_SPACE_LIMIT) { 84 ret = -E2BIG; 85 goto end_register; 86 } 87 new_range->io_start = iio_sz; 88 } else { 89 /* invalid flag */ 90 ret = -EINVAL; 91 goto end_register; 92 } 93 94 list_add_tail_rcu(&new_range->list, &io_range_list); 95 96end_register: 97 mutex_unlock(&io_range_mutex); 98 return ret; 99} 100 101/** 102 * find_io_range_by_fwnode - find logical PIO range for given FW node 103 * @fwnode: FW node handle associated with logical PIO range 104 * 105 * Returns pointer to node on success, NULL otherwise. 106 * 107 * Traverse the io_range_list to find the registered node for @fwnode. 108 */ 109struct logic_pio_hwaddr *find_io_range_by_fwnode(struct fwnode_handle *fwnode) 110{ 111 struct logic_pio_hwaddr *range; 112 113 list_for_each_entry_rcu(range, &io_range_list, list) { 114 if (range->fwnode == fwnode) 115 return range; 116 } 117 return NULL; 118} 119 120/* Return a registered range given an input PIO token */ 121static struct logic_pio_hwaddr *find_io_range(unsigned long pio) 122{ 123 struct logic_pio_hwaddr *range; 124 125 list_for_each_entry_rcu(range, &io_range_list, list) { 126 if (in_range(pio, range->io_start, range->size)) 127 return range; 128 } 129 pr_err("PIO entry token %lx invalid\n", pio); 130 return NULL; 131} 132 133/** 134 * logic_pio_to_hwaddr - translate logical PIO to HW address 135 * @pio: logical PIO value 136 * 137 * Returns HW address if valid, ~0 otherwise. 138 * 139 * Translate the input logical PIO to the corresponding hardware address. 140 * The input PIO should be unique in the whole logical PIO space. 141 */ 142resource_size_t logic_pio_to_hwaddr(unsigned long pio) 143{ 144 struct logic_pio_hwaddr *range; 145 146 range = find_io_range(pio); 147 if (range) 148 return range->hw_start + pio - range->io_start; 149 150 return (resource_size_t)~0; 151} 152 153/** 154 * logic_pio_trans_hwaddr - translate HW address to logical PIO 155 * @fwnode: FW node reference for the host 156 * @addr: Host-relative HW address 157 * @size: size to translate 158 * 159 * Returns Logical PIO value if successful, ~0UL otherwise 160 */ 161unsigned long logic_pio_trans_hwaddr(struct fwnode_handle *fwnode, 162 resource_size_t addr, resource_size_t size) 163{ 164 struct logic_pio_hwaddr *range; 165 166 range = find_io_range_by_fwnode(fwnode); 167 if (!range || range->flags == LOGIC_PIO_CPU_MMIO) { 168 pr_err("IO range not found or invalid\n"); 169 return ~0UL; 170 } 171 if (range->size < size) { 172 pr_err("resource size %pa cannot fit in IO range size %pa\n", 173 &size, &range->size); 174 return ~0UL; 175 } 176 return addr - range->hw_start + range->io_start; 177} 178 179unsigned long logic_pio_trans_cpuaddr(resource_size_t addr) 180{ 181 struct logic_pio_hwaddr *range; 182 183 list_for_each_entry_rcu(range, &io_range_list, list) { 184 if (range->flags != LOGIC_PIO_CPU_MMIO) 185 continue; 186 if (in_range(addr, range->hw_start, range->size)) 187 return addr - range->hw_start + range->io_start; 188 } 189 pr_err("addr %llx not registered in io_range_list\n", 190 (unsigned long long) addr); 191 return ~0UL; 192} 193 194#if defined(CONFIG_INDIRECT_PIO) && defined(PCI_IOBASE) 195#define BUILD_LOGIC_IO(bw, type) \ 196type logic_in##bw(unsigned long addr) \ 197{ \ 198 type ret = (type)~0; \ 199 \ 200 if (addr < MMIO_UPPER_LIMIT) { \ 201 ret = read##bw(PCI_IOBASE + addr); \ 202 } else if (addr >= MMIO_UPPER_LIMIT && addr < IO_SPACE_LIMIT) { \ 203 struct logic_pio_hwaddr *entry = find_io_range(addr); \ 204 \ 205 if (entry && entry->ops) \ 206 ret = entry->ops->in(entry->hostdata, \ 207 addr, sizeof(type)); \ 208 else \ 209 WARN_ON_ONCE(1); \ 210 } \ 211 return ret; \ 212} \ 213 \ 214void logic_out##bw(type value, unsigned long addr) \ 215{ \ 216 if (addr < MMIO_UPPER_LIMIT) { \ 217 write##bw(value, PCI_IOBASE + addr); \ 218 } else if (addr >= MMIO_UPPER_LIMIT && addr < IO_SPACE_LIMIT) { \ 219 struct logic_pio_hwaddr *entry = find_io_range(addr); \ 220 \ 221 if (entry && entry->ops) \ 222 entry->ops->out(entry->hostdata, \ 223 addr, value, sizeof(type)); \ 224 else \ 225 WARN_ON_ONCE(1); \ 226 } \ 227} \ 228 \ 229void logic_ins##bw(unsigned long addr, void *buffer, \ 230 unsigned int count) \ 231{ \ 232 if (addr < MMIO_UPPER_LIMIT) { \ 233 reads##bw(PCI_IOBASE + addr, buffer, count); \ 234 } else if (addr >= MMIO_UPPER_LIMIT && addr < IO_SPACE_LIMIT) { \ 235 struct logic_pio_hwaddr *entry = find_io_range(addr); \ 236 \ 237 if (entry && entry->ops) \ 238 entry->ops->ins(entry->hostdata, \ 239 addr, buffer, sizeof(type), count); \ 240 else \ 241 WARN_ON_ONCE(1); \ 242 } \ 243 \ 244} \ 245 \ 246void logic_outs##bw(unsigned long addr, const void *buffer, \ 247 unsigned int count) \ 248{ \ 249 if (addr < MMIO_UPPER_LIMIT) { \ 250 writes##bw(PCI_IOBASE + addr, buffer, count); \ 251 } else if (addr >= MMIO_UPPER_LIMIT && addr < IO_SPACE_LIMIT) { \ 252 struct logic_pio_hwaddr *entry = find_io_range(addr); \ 253 \ 254 if (entry && entry->ops) \ 255 entry->ops->outs(entry->hostdata, \ 256 addr, buffer, sizeof(type), count); \ 257 else \ 258 WARN_ON_ONCE(1); \ 259 } \ 260} 261 262BUILD_LOGIC_IO(b, u8) 263EXPORT_SYMBOL(logic_inb); 264EXPORT_SYMBOL(logic_insb); 265EXPORT_SYMBOL(logic_outb); 266EXPORT_SYMBOL(logic_outsb); 267 268BUILD_LOGIC_IO(w, u16) 269EXPORT_SYMBOL(logic_inw); 270EXPORT_SYMBOL(logic_insw); 271EXPORT_SYMBOL(logic_outw); 272EXPORT_SYMBOL(logic_outsw); 273 274BUILD_LOGIC_IO(l, u32) 275EXPORT_SYMBOL(logic_inl); 276EXPORT_SYMBOL(logic_insl); 277EXPORT_SYMBOL(logic_outl); 278EXPORT_SYMBOL(logic_outsl); 279 280#endif /* CONFIG_INDIRECT_PIO && PCI_IOBASE */