Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1// SPDX-License-Identifier: GPL-2.0-or-later
2/*
3 * IBM PowerPC Virtual I/O Infrastructure Support.
4 *
5 * Copyright (c) 2003,2008 IBM Corp.
6 * Dave Engebretsen engebret@us.ibm.com
7 * Santiago Leon santil@us.ibm.com
8 * Hollis Blanchard <hollisb@us.ibm.com>
9 * Stephen Rothwell
10 * Robert Jennings <rcjenn@us.ibm.com>
11 */
12
13#include <linux/cpu.h>
14#include <linux/types.h>
15#include <linux/delay.h>
16#include <linux/stat.h>
17#include <linux/device.h>
18#include <linux/init.h>
19#include <linux/slab.h>
20#include <linux/console.h>
21#include <linux/export.h>
22#include <linux/mm.h>
23#include <linux/dma-mapping.h>
24#include <linux/kobject.h>
25
26#include <asm/iommu.h>
27#include <asm/dma.h>
28#include <asm/vio.h>
29#include <asm/prom.h>
30#include <asm/firmware.h>
31#include <asm/tce.h>
32#include <asm/page.h>
33#include <asm/hvcall.h>
34#include <asm/machdep.h>
35
36static struct vio_dev vio_bus_device = { /* fake "parent" device */
37 .name = "vio",
38 .type = "",
39 .dev.init_name = "vio",
40 .dev.bus = &vio_bus_type,
41};
42
43#ifdef CONFIG_PPC_SMLPAR
44/**
45 * vio_cmo_pool - A pool of IO memory for CMO use
46 *
47 * @size: The size of the pool in bytes
48 * @free: The amount of free memory in the pool
49 */
50struct vio_cmo_pool {
51 size_t size;
52 size_t free;
53};
54
55/* How many ms to delay queued balance work */
56#define VIO_CMO_BALANCE_DELAY 100
57
58/* Portion out IO memory to CMO devices by this chunk size */
59#define VIO_CMO_BALANCE_CHUNK 131072
60
61/**
62 * vio_cmo_dev_entry - A device that is CMO-enabled and requires entitlement
63 *
64 * @vio_dev: struct vio_dev pointer
65 * @list: pointer to other devices on bus that are being tracked
66 */
67struct vio_cmo_dev_entry {
68 struct vio_dev *viodev;
69 struct list_head list;
70};
71
72/**
73 * vio_cmo - VIO bus accounting structure for CMO entitlement
74 *
75 * @lock: spinlock for entire structure
76 * @balance_q: work queue for balancing system entitlement
77 * @device_list: list of CMO-enabled devices requiring entitlement
78 * @entitled: total system entitlement in bytes
79 * @reserve: pool of memory from which devices reserve entitlement, incl. spare
80 * @excess: pool of excess entitlement not needed for device reserves or spare
81 * @spare: IO memory for device hotplug functionality
82 * @min: minimum necessary for system operation
83 * @desired: desired memory for system operation
84 * @curr: bytes currently allocated
85 * @high: high water mark for IO data usage
86 */
87static struct vio_cmo {
88 spinlock_t lock;
89 struct delayed_work balance_q;
90 struct list_head device_list;
91 size_t entitled;
92 struct vio_cmo_pool reserve;
93 struct vio_cmo_pool excess;
94 size_t spare;
95 size_t min;
96 size_t desired;
97 size_t curr;
98 size_t high;
99} vio_cmo;
100
101/**
102 * vio_cmo_OF_devices - Count the number of OF devices that have DMA windows
103 */
104static int vio_cmo_num_OF_devs(void)
105{
106 struct device_node *node_vroot;
107 int count = 0;
108
109 /*
110 * Count the number of vdevice entries with an
111 * ibm,my-dma-window OF property
112 */
113 node_vroot = of_find_node_by_name(NULL, "vdevice");
114 if (node_vroot) {
115 struct device_node *of_node;
116 struct property *prop;
117
118 for_each_child_of_node(node_vroot, of_node) {
119 prop = of_find_property(of_node, "ibm,my-dma-window",
120 NULL);
121 if (prop)
122 count++;
123 }
124 }
125 of_node_put(node_vroot);
126 return count;
127}
128
129/**
130 * vio_cmo_alloc - allocate IO memory for CMO-enable devices
131 *
132 * @viodev: VIO device requesting IO memory
133 * @size: size of allocation requested
134 *
135 * Allocations come from memory reserved for the devices and any excess
136 * IO memory available to all devices. The spare pool used to service
137 * hotplug must be equal to %VIO_CMO_MIN_ENT for the excess pool to be
138 * made available.
139 *
140 * Return codes:
141 * 0 for successful allocation and -ENOMEM for a failure
142 */
143static inline int vio_cmo_alloc(struct vio_dev *viodev, size_t size)
144{
145 unsigned long flags;
146 size_t reserve_free = 0;
147 size_t excess_free = 0;
148 int ret = -ENOMEM;
149
150 spin_lock_irqsave(&vio_cmo.lock, flags);
151
152 /* Determine the amount of free entitlement available in reserve */
153 if (viodev->cmo.entitled > viodev->cmo.allocated)
154 reserve_free = viodev->cmo.entitled - viodev->cmo.allocated;
155
156 /* If spare is not fulfilled, the excess pool can not be used. */
157 if (vio_cmo.spare >= VIO_CMO_MIN_ENT)
158 excess_free = vio_cmo.excess.free;
159
160 /* The request can be satisfied */
161 if ((reserve_free + excess_free) >= size) {
162 vio_cmo.curr += size;
163 if (vio_cmo.curr > vio_cmo.high)
164 vio_cmo.high = vio_cmo.curr;
165 viodev->cmo.allocated += size;
166 size -= min(reserve_free, size);
167 vio_cmo.excess.free -= size;
168 ret = 0;
169 }
170
171 spin_unlock_irqrestore(&vio_cmo.lock, flags);
172 return ret;
173}
174
175/**
176 * vio_cmo_dealloc - deallocate IO memory from CMO-enable devices
177 * @viodev: VIO device freeing IO memory
178 * @size: size of deallocation
179 *
180 * IO memory is freed by the device back to the correct memory pools.
181 * The spare pool is replenished first from either memory pool, then
182 * the reserve pool is used to reduce device entitlement, the excess
183 * pool is used to increase the reserve pool toward the desired entitlement
184 * target, and then the remaining memory is returned to the pools.
185 *
186 */
187static inline void vio_cmo_dealloc(struct vio_dev *viodev, size_t size)
188{
189 unsigned long flags;
190 size_t spare_needed = 0;
191 size_t excess_freed = 0;
192 size_t reserve_freed = size;
193 size_t tmp;
194 int balance = 0;
195
196 spin_lock_irqsave(&vio_cmo.lock, flags);
197 vio_cmo.curr -= size;
198
199 /* Amount of memory freed from the excess pool */
200 if (viodev->cmo.allocated > viodev->cmo.entitled) {
201 excess_freed = min(reserve_freed, (viodev->cmo.allocated -
202 viodev->cmo.entitled));
203 reserve_freed -= excess_freed;
204 }
205
206 /* Remove allocation from device */
207 viodev->cmo.allocated -= (reserve_freed + excess_freed);
208
209 /* Spare is a subset of the reserve pool, replenish it first. */
210 spare_needed = VIO_CMO_MIN_ENT - vio_cmo.spare;
211
212 /*
213 * Replenish the spare in the reserve pool from the excess pool.
214 * This moves entitlement into the reserve pool.
215 */
216 if (spare_needed && excess_freed) {
217 tmp = min(excess_freed, spare_needed);
218 vio_cmo.excess.size -= tmp;
219 vio_cmo.reserve.size += tmp;
220 vio_cmo.spare += tmp;
221 excess_freed -= tmp;
222 spare_needed -= tmp;
223 balance = 1;
224 }
225
226 /*
227 * Replenish the spare in the reserve pool from the reserve pool.
228 * This removes entitlement from the device down to VIO_CMO_MIN_ENT,
229 * if needed, and gives it to the spare pool. The amount of used
230 * memory in this pool does not change.
231 */
232 if (spare_needed && reserve_freed) {
233 tmp = min3(spare_needed, reserve_freed, (viodev->cmo.entitled - VIO_CMO_MIN_ENT));
234
235 vio_cmo.spare += tmp;
236 viodev->cmo.entitled -= tmp;
237 reserve_freed -= tmp;
238 spare_needed -= tmp;
239 balance = 1;
240 }
241
242 /*
243 * Increase the reserve pool until the desired allocation is met.
244 * Move an allocation freed from the excess pool into the reserve
245 * pool and schedule a balance operation.
246 */
247 if (excess_freed && (vio_cmo.desired > vio_cmo.reserve.size)) {
248 tmp = min(excess_freed, (vio_cmo.desired - vio_cmo.reserve.size));
249
250 vio_cmo.excess.size -= tmp;
251 vio_cmo.reserve.size += tmp;
252 excess_freed -= tmp;
253 balance = 1;
254 }
255
256 /* Return memory from the excess pool to that pool */
257 if (excess_freed)
258 vio_cmo.excess.free += excess_freed;
259
260 if (balance)
261 schedule_delayed_work(&vio_cmo.balance_q, VIO_CMO_BALANCE_DELAY);
262 spin_unlock_irqrestore(&vio_cmo.lock, flags);
263}
264
265/**
266 * vio_cmo_entitlement_update - Manage system entitlement changes
267 *
268 * @new_entitlement: new system entitlement to attempt to accommodate
269 *
270 * Increases in entitlement will be used to fulfill the spare entitlement
271 * and the rest is given to the excess pool. Decreases, if they are
272 * possible, come from the excess pool and from unused device entitlement
273 *
274 * Returns: 0 on success, -ENOMEM when change can not be made
275 */
276int vio_cmo_entitlement_update(size_t new_entitlement)
277{
278 struct vio_dev *viodev;
279 struct vio_cmo_dev_entry *dev_ent;
280 unsigned long flags;
281 size_t avail, delta, tmp;
282
283 spin_lock_irqsave(&vio_cmo.lock, flags);
284
285 /* Entitlement increases */
286 if (new_entitlement > vio_cmo.entitled) {
287 delta = new_entitlement - vio_cmo.entitled;
288
289 /* Fulfill spare allocation */
290 if (vio_cmo.spare < VIO_CMO_MIN_ENT) {
291 tmp = min(delta, (VIO_CMO_MIN_ENT - vio_cmo.spare));
292 vio_cmo.spare += tmp;
293 vio_cmo.reserve.size += tmp;
294 delta -= tmp;
295 }
296
297 /* Remaining new allocation goes to the excess pool */
298 vio_cmo.entitled += delta;
299 vio_cmo.excess.size += delta;
300 vio_cmo.excess.free += delta;
301
302 goto out;
303 }
304
305 /* Entitlement decreases */
306 delta = vio_cmo.entitled - new_entitlement;
307 avail = vio_cmo.excess.free;
308
309 /*
310 * Need to check how much unused entitlement each device can
311 * sacrifice to fulfill entitlement change.
312 */
313 list_for_each_entry(dev_ent, &vio_cmo.device_list, list) {
314 if (avail >= delta)
315 break;
316
317 viodev = dev_ent->viodev;
318 if ((viodev->cmo.entitled > viodev->cmo.allocated) &&
319 (viodev->cmo.entitled > VIO_CMO_MIN_ENT))
320 avail += viodev->cmo.entitled -
321 max_t(size_t, viodev->cmo.allocated,
322 VIO_CMO_MIN_ENT);
323 }
324
325 if (delta <= avail) {
326 vio_cmo.entitled -= delta;
327
328 /* Take entitlement from the excess pool first */
329 tmp = min(vio_cmo.excess.free, delta);
330 vio_cmo.excess.size -= tmp;
331 vio_cmo.excess.free -= tmp;
332 delta -= tmp;
333
334 /*
335 * Remove all but VIO_CMO_MIN_ENT bytes from devices
336 * until entitlement change is served
337 */
338 list_for_each_entry(dev_ent, &vio_cmo.device_list, list) {
339 if (!delta)
340 break;
341
342 viodev = dev_ent->viodev;
343 tmp = 0;
344 if ((viodev->cmo.entitled > viodev->cmo.allocated) &&
345 (viodev->cmo.entitled > VIO_CMO_MIN_ENT))
346 tmp = viodev->cmo.entitled -
347 max_t(size_t, viodev->cmo.allocated,
348 VIO_CMO_MIN_ENT);
349 viodev->cmo.entitled -= min(tmp, delta);
350 delta -= min(tmp, delta);
351 }
352 } else {
353 spin_unlock_irqrestore(&vio_cmo.lock, flags);
354 return -ENOMEM;
355 }
356
357out:
358 schedule_delayed_work(&vio_cmo.balance_q, 0);
359 spin_unlock_irqrestore(&vio_cmo.lock, flags);
360 return 0;
361}
362
363/**
364 * vio_cmo_balance - Balance entitlement among devices
365 *
366 * @work: work queue structure for this operation
367 *
368 * Any system entitlement above the minimum needed for devices, or
369 * already allocated to devices, can be distributed to the devices.
370 * The list of devices is iterated through to recalculate the desired
371 * entitlement level and to determine how much entitlement above the
372 * minimum entitlement is allocated to devices.
373 *
374 * Small chunks of the available entitlement are given to devices until
375 * their requirements are fulfilled or there is no entitlement left to give.
376 * Upon completion sizes of the reserve and excess pools are calculated.
377 *
378 * The system minimum entitlement level is also recalculated here.
379 * Entitlement will be reserved for devices even after vio_bus_remove to
380 * accommodate reloading the driver. The OF tree is walked to count the
381 * number of devices present and this will remove entitlement for devices
382 * that have actually left the system after having vio_bus_remove called.
383 */
384static void vio_cmo_balance(struct work_struct *work)
385{
386 struct vio_cmo *cmo;
387 struct vio_dev *viodev;
388 struct vio_cmo_dev_entry *dev_ent;
389 unsigned long flags;
390 size_t avail = 0, level, chunk, need;
391 int devcount = 0, fulfilled;
392
393 cmo = container_of(work, struct vio_cmo, balance_q.work);
394
395 spin_lock_irqsave(&vio_cmo.lock, flags);
396
397 /* Calculate minimum entitlement and fulfill spare */
398 cmo->min = vio_cmo_num_OF_devs() * VIO_CMO_MIN_ENT;
399 BUG_ON(cmo->min > cmo->entitled);
400 cmo->spare = min_t(size_t, VIO_CMO_MIN_ENT, (cmo->entitled - cmo->min));
401 cmo->min += cmo->spare;
402 cmo->desired = cmo->min;
403
404 /*
405 * Determine how much entitlement is available and reset device
406 * entitlements
407 */
408 avail = cmo->entitled - cmo->spare;
409 list_for_each_entry(dev_ent, &vio_cmo.device_list, list) {
410 viodev = dev_ent->viodev;
411 devcount++;
412 viodev->cmo.entitled = VIO_CMO_MIN_ENT;
413 cmo->desired += (viodev->cmo.desired - VIO_CMO_MIN_ENT);
414 avail -= max_t(size_t, viodev->cmo.allocated, VIO_CMO_MIN_ENT);
415 }
416
417 /*
418 * Having provided each device with the minimum entitlement, loop
419 * over the devices portioning out the remaining entitlement
420 * until there is nothing left.
421 */
422 level = VIO_CMO_MIN_ENT;
423 while (avail) {
424 fulfilled = 0;
425 list_for_each_entry(dev_ent, &vio_cmo.device_list, list) {
426 viodev = dev_ent->viodev;
427
428 if (viodev->cmo.desired <= level) {
429 fulfilled++;
430 continue;
431 }
432
433 /*
434 * Give the device up to VIO_CMO_BALANCE_CHUNK
435 * bytes of entitlement, but do not exceed the
436 * desired level of entitlement for the device.
437 */
438 chunk = min_t(size_t, avail, VIO_CMO_BALANCE_CHUNK);
439 chunk = min(chunk, (viodev->cmo.desired -
440 viodev->cmo.entitled));
441 viodev->cmo.entitled += chunk;
442
443 /*
444 * If the memory for this entitlement increase was
445 * already allocated to the device it does not come
446 * from the available pool being portioned out.
447 */
448 need = max(viodev->cmo.allocated, viodev->cmo.entitled)-
449 max(viodev->cmo.allocated, level);
450 avail -= need;
451
452 }
453 if (fulfilled == devcount)
454 break;
455 level += VIO_CMO_BALANCE_CHUNK;
456 }
457
458 /* Calculate new reserve and excess pool sizes */
459 cmo->reserve.size = cmo->min;
460 cmo->excess.free = 0;
461 cmo->excess.size = 0;
462 need = 0;
463 list_for_each_entry(dev_ent, &vio_cmo.device_list, list) {
464 viodev = dev_ent->viodev;
465 /* Calculated reserve size above the minimum entitlement */
466 if (viodev->cmo.entitled)
467 cmo->reserve.size += (viodev->cmo.entitled -
468 VIO_CMO_MIN_ENT);
469 /* Calculated used excess entitlement */
470 if (viodev->cmo.allocated > viodev->cmo.entitled)
471 need += viodev->cmo.allocated - viodev->cmo.entitled;
472 }
473 cmo->excess.size = cmo->entitled - cmo->reserve.size;
474 cmo->excess.free = cmo->excess.size - need;
475
476 cancel_delayed_work(to_delayed_work(work));
477 spin_unlock_irqrestore(&vio_cmo.lock, flags);
478}
479
480static void *vio_dma_iommu_alloc_coherent(struct device *dev, size_t size,
481 dma_addr_t *dma_handle, gfp_t flag,
482 unsigned long attrs)
483{
484 struct vio_dev *viodev = to_vio_dev(dev);
485 void *ret;
486
487 if (vio_cmo_alloc(viodev, roundup(size, PAGE_SIZE))) {
488 atomic_inc(&viodev->cmo.allocs_failed);
489 return NULL;
490 }
491
492 ret = iommu_alloc_coherent(dev, get_iommu_table_base(dev), size,
493 dma_handle, dev->coherent_dma_mask, flag,
494 dev_to_node(dev));
495 if (unlikely(ret == NULL)) {
496 vio_cmo_dealloc(viodev, roundup(size, PAGE_SIZE));
497 atomic_inc(&viodev->cmo.allocs_failed);
498 }
499
500 return ret;
501}
502
503static void vio_dma_iommu_free_coherent(struct device *dev, size_t size,
504 void *vaddr, dma_addr_t dma_handle,
505 unsigned long attrs)
506{
507 struct vio_dev *viodev = to_vio_dev(dev);
508
509 iommu_free_coherent(get_iommu_table_base(dev), size, vaddr, dma_handle);
510 vio_cmo_dealloc(viodev, roundup(size, PAGE_SIZE));
511}
512
513static dma_addr_t vio_dma_iommu_map_page(struct device *dev, struct page *page,
514 unsigned long offset, size_t size,
515 enum dma_data_direction direction,
516 unsigned long attrs)
517{
518 struct vio_dev *viodev = to_vio_dev(dev);
519 struct iommu_table *tbl = get_iommu_table_base(dev);
520 dma_addr_t ret = DMA_MAPPING_ERROR;
521
522 if (vio_cmo_alloc(viodev, roundup(size, IOMMU_PAGE_SIZE(tbl))))
523 goto out_fail;
524 ret = iommu_map_page(dev, tbl, page, offset, size, dma_get_mask(dev),
525 direction, attrs);
526 if (unlikely(ret == DMA_MAPPING_ERROR))
527 goto out_deallocate;
528 return ret;
529
530out_deallocate:
531 vio_cmo_dealloc(viodev, roundup(size, IOMMU_PAGE_SIZE(tbl)));
532out_fail:
533 atomic_inc(&viodev->cmo.allocs_failed);
534 return DMA_MAPPING_ERROR;
535}
536
537static void vio_dma_iommu_unmap_page(struct device *dev, dma_addr_t dma_handle,
538 size_t size,
539 enum dma_data_direction direction,
540 unsigned long attrs)
541{
542 struct vio_dev *viodev = to_vio_dev(dev);
543 struct iommu_table *tbl = get_iommu_table_base(dev);
544
545 iommu_unmap_page(tbl, dma_handle, size, direction, attrs);
546 vio_cmo_dealloc(viodev, roundup(size, IOMMU_PAGE_SIZE(tbl)));
547}
548
549static int vio_dma_iommu_map_sg(struct device *dev, struct scatterlist *sglist,
550 int nelems, enum dma_data_direction direction,
551 unsigned long attrs)
552{
553 struct vio_dev *viodev = to_vio_dev(dev);
554 struct iommu_table *tbl = get_iommu_table_base(dev);
555 struct scatterlist *sgl;
556 int ret, count;
557 size_t alloc_size = 0;
558
559 for_each_sg(sglist, sgl, nelems, count)
560 alloc_size += roundup(sgl->length, IOMMU_PAGE_SIZE(tbl));
561
562 if (vio_cmo_alloc(viodev, alloc_size))
563 goto out_fail;
564 ret = ppc_iommu_map_sg(dev, tbl, sglist, nelems, dma_get_mask(dev),
565 direction, attrs);
566 if (unlikely(!ret))
567 goto out_deallocate;
568
569 for_each_sg(sglist, sgl, ret, count)
570 alloc_size -= roundup(sgl->dma_length, IOMMU_PAGE_SIZE(tbl));
571 if (alloc_size)
572 vio_cmo_dealloc(viodev, alloc_size);
573 return ret;
574
575out_deallocate:
576 vio_cmo_dealloc(viodev, alloc_size);
577out_fail:
578 atomic_inc(&viodev->cmo.allocs_failed);
579 return 0;
580}
581
582static void vio_dma_iommu_unmap_sg(struct device *dev,
583 struct scatterlist *sglist, int nelems,
584 enum dma_data_direction direction,
585 unsigned long attrs)
586{
587 struct vio_dev *viodev = to_vio_dev(dev);
588 struct iommu_table *tbl = get_iommu_table_base(dev);
589 struct scatterlist *sgl;
590 size_t alloc_size = 0;
591 int count;
592
593 for_each_sg(sglist, sgl, nelems, count)
594 alloc_size += roundup(sgl->dma_length, IOMMU_PAGE_SIZE(tbl));
595
596 ppc_iommu_unmap_sg(tbl, sglist, nelems, direction, attrs);
597 vio_cmo_dealloc(viodev, alloc_size);
598}
599
600static const struct dma_map_ops vio_dma_mapping_ops = {
601 .alloc = vio_dma_iommu_alloc_coherent,
602 .free = vio_dma_iommu_free_coherent,
603 .map_sg = vio_dma_iommu_map_sg,
604 .unmap_sg = vio_dma_iommu_unmap_sg,
605 .map_page = vio_dma_iommu_map_page,
606 .unmap_page = vio_dma_iommu_unmap_page,
607 .dma_supported = dma_iommu_dma_supported,
608 .get_required_mask = dma_iommu_get_required_mask,
609 .mmap = dma_common_mmap,
610 .get_sgtable = dma_common_get_sgtable,
611};
612
613/**
614 * vio_cmo_set_dev_desired - Set desired entitlement for a device
615 *
616 * @viodev: struct vio_dev for device to alter
617 * @desired: new desired entitlement level in bytes
618 *
619 * For use by devices to request a change to their entitlement at runtime or
620 * through sysfs. The desired entitlement level is changed and a balancing
621 * of system resources is scheduled to run in the future.
622 */
623void vio_cmo_set_dev_desired(struct vio_dev *viodev, size_t desired)
624{
625 unsigned long flags;
626 struct vio_cmo_dev_entry *dev_ent;
627 int found = 0;
628
629 if (!firmware_has_feature(FW_FEATURE_CMO))
630 return;
631
632 spin_lock_irqsave(&vio_cmo.lock, flags);
633 if (desired < VIO_CMO_MIN_ENT)
634 desired = VIO_CMO_MIN_ENT;
635
636 /*
637 * Changes will not be made for devices not in the device list.
638 * If it is not in the device list, then no driver is loaded
639 * for the device and it can not receive entitlement.
640 */
641 list_for_each_entry(dev_ent, &vio_cmo.device_list, list)
642 if (viodev == dev_ent->viodev) {
643 found = 1;
644 break;
645 }
646 if (!found) {
647 spin_unlock_irqrestore(&vio_cmo.lock, flags);
648 return;
649 }
650
651 /* Increase/decrease in desired device entitlement */
652 if (desired >= viodev->cmo.desired) {
653 /* Just bump the bus and device values prior to a balance*/
654 vio_cmo.desired += desired - viodev->cmo.desired;
655 viodev->cmo.desired = desired;
656 } else {
657 /* Decrease bus and device values for desired entitlement */
658 vio_cmo.desired -= viodev->cmo.desired - desired;
659 viodev->cmo.desired = desired;
660 /*
661 * If less entitlement is desired than current entitlement, move
662 * any reserve memory in the change region to the excess pool.
663 */
664 if (viodev->cmo.entitled > desired) {
665 vio_cmo.reserve.size -= viodev->cmo.entitled - desired;
666 vio_cmo.excess.size += viodev->cmo.entitled - desired;
667 /*
668 * If entitlement moving from the reserve pool to the
669 * excess pool is currently unused, add to the excess
670 * free counter.
671 */
672 if (viodev->cmo.allocated < viodev->cmo.entitled)
673 vio_cmo.excess.free += viodev->cmo.entitled -
674 max(viodev->cmo.allocated, desired);
675 viodev->cmo.entitled = desired;
676 }
677 }
678 schedule_delayed_work(&vio_cmo.balance_q, 0);
679 spin_unlock_irqrestore(&vio_cmo.lock, flags);
680}
681
682/**
683 * vio_cmo_bus_probe - Handle CMO specific bus probe activities
684 *
685 * @viodev - Pointer to struct vio_dev for device
686 *
687 * Determine the devices IO memory entitlement needs, attempting
688 * to satisfy the system minimum entitlement at first and scheduling
689 * a balance operation to take care of the rest at a later time.
690 *
691 * Returns: 0 on success, -EINVAL when device doesn't support CMO, and
692 * -ENOMEM when entitlement is not available for device or
693 * device entry.
694 *
695 */
696static int vio_cmo_bus_probe(struct vio_dev *viodev)
697{
698 struct vio_cmo_dev_entry *dev_ent;
699 struct device *dev = &viodev->dev;
700 struct iommu_table *tbl;
701 struct vio_driver *viodrv = to_vio_driver(dev->driver);
702 unsigned long flags;
703 size_t size;
704 bool dma_capable = false;
705
706 tbl = get_iommu_table_base(dev);
707
708 /* A device requires entitlement if it has a DMA window property */
709 switch (viodev->family) {
710 case VDEVICE:
711 if (of_get_property(viodev->dev.of_node,
712 "ibm,my-dma-window", NULL))
713 dma_capable = true;
714 break;
715 case PFO:
716 dma_capable = false;
717 break;
718 default:
719 dev_warn(dev, "unknown device family: %d\n", viodev->family);
720 BUG();
721 break;
722 }
723
724 /* Configure entitlement for the device. */
725 if (dma_capable) {
726 /* Check that the driver is CMO enabled and get desired DMA */
727 if (!viodrv->get_desired_dma) {
728 dev_err(dev, "%s: device driver does not support CMO\n",
729 __func__);
730 return -EINVAL;
731 }
732
733 viodev->cmo.desired =
734 IOMMU_PAGE_ALIGN(viodrv->get_desired_dma(viodev), tbl);
735 if (viodev->cmo.desired < VIO_CMO_MIN_ENT)
736 viodev->cmo.desired = VIO_CMO_MIN_ENT;
737 size = VIO_CMO_MIN_ENT;
738
739 dev_ent = kmalloc(sizeof(struct vio_cmo_dev_entry),
740 GFP_KERNEL);
741 if (!dev_ent)
742 return -ENOMEM;
743
744 dev_ent->viodev = viodev;
745 spin_lock_irqsave(&vio_cmo.lock, flags);
746 list_add(&dev_ent->list, &vio_cmo.device_list);
747 } else {
748 viodev->cmo.desired = 0;
749 size = 0;
750 spin_lock_irqsave(&vio_cmo.lock, flags);
751 }
752
753 /*
754 * If the needs for vio_cmo.min have not changed since they
755 * were last set, the number of devices in the OF tree has
756 * been constant and the IO memory for this is already in
757 * the reserve pool.
758 */
759 if (vio_cmo.min == ((vio_cmo_num_OF_devs() + 1) *
760 VIO_CMO_MIN_ENT)) {
761 /* Updated desired entitlement if device requires it */
762 if (size)
763 vio_cmo.desired += (viodev->cmo.desired -
764 VIO_CMO_MIN_ENT);
765 } else {
766 size_t tmp;
767
768 tmp = vio_cmo.spare + vio_cmo.excess.free;
769 if (tmp < size) {
770 dev_err(dev, "%s: insufficient free "
771 "entitlement to add device. "
772 "Need %lu, have %lu\n", __func__,
773 size, (vio_cmo.spare + tmp));
774 spin_unlock_irqrestore(&vio_cmo.lock, flags);
775 return -ENOMEM;
776 }
777
778 /* Use excess pool first to fulfill request */
779 tmp = min(size, vio_cmo.excess.free);
780 vio_cmo.excess.free -= tmp;
781 vio_cmo.excess.size -= tmp;
782 vio_cmo.reserve.size += tmp;
783
784 /* Use spare if excess pool was insufficient */
785 vio_cmo.spare -= size - tmp;
786
787 /* Update bus accounting */
788 vio_cmo.min += size;
789 vio_cmo.desired += viodev->cmo.desired;
790 }
791 spin_unlock_irqrestore(&vio_cmo.lock, flags);
792 return 0;
793}
794
795/**
796 * vio_cmo_bus_remove - Handle CMO specific bus removal activities
797 *
798 * @viodev - Pointer to struct vio_dev for device
799 *
800 * Remove the device from the cmo device list. The minimum entitlement
801 * will be reserved for the device as long as it is in the system. The
802 * rest of the entitlement the device had been allocated will be returned
803 * to the system.
804 */
805static void vio_cmo_bus_remove(struct vio_dev *viodev)
806{
807 struct vio_cmo_dev_entry *dev_ent;
808 unsigned long flags;
809 size_t tmp;
810
811 spin_lock_irqsave(&vio_cmo.lock, flags);
812 if (viodev->cmo.allocated) {
813 dev_err(&viodev->dev, "%s: device had %lu bytes of IO "
814 "allocated after remove operation.\n",
815 __func__, viodev->cmo.allocated);
816 BUG();
817 }
818
819 /*
820 * Remove the device from the device list being maintained for
821 * CMO enabled devices.
822 */
823 list_for_each_entry(dev_ent, &vio_cmo.device_list, list)
824 if (viodev == dev_ent->viodev) {
825 list_del(&dev_ent->list);
826 kfree(dev_ent);
827 break;
828 }
829
830 /*
831 * Devices may not require any entitlement and they do not need
832 * to be processed. Otherwise, return the device's entitlement
833 * back to the pools.
834 */
835 if (viodev->cmo.entitled) {
836 /*
837 * This device has not yet left the OF tree, it's
838 * minimum entitlement remains in vio_cmo.min and
839 * vio_cmo.desired
840 */
841 vio_cmo.desired -= (viodev->cmo.desired - VIO_CMO_MIN_ENT);
842
843 /*
844 * Save min allocation for device in reserve as long
845 * as it exists in OF tree as determined by later
846 * balance operation
847 */
848 viodev->cmo.entitled -= VIO_CMO_MIN_ENT;
849
850 /* Replenish spare from freed reserve pool */
851 if (viodev->cmo.entitled && (vio_cmo.spare < VIO_CMO_MIN_ENT)) {
852 tmp = min(viodev->cmo.entitled, (VIO_CMO_MIN_ENT -
853 vio_cmo.spare));
854 vio_cmo.spare += tmp;
855 viodev->cmo.entitled -= tmp;
856 }
857
858 /* Remaining reserve goes to excess pool */
859 vio_cmo.excess.size += viodev->cmo.entitled;
860 vio_cmo.excess.free += viodev->cmo.entitled;
861 vio_cmo.reserve.size -= viodev->cmo.entitled;
862
863 /*
864 * Until the device is removed it will keep a
865 * minimum entitlement; this will guarantee that
866 * a module unload/load will result in a success.
867 */
868 viodev->cmo.entitled = VIO_CMO_MIN_ENT;
869 viodev->cmo.desired = VIO_CMO_MIN_ENT;
870 atomic_set(&viodev->cmo.allocs_failed, 0);
871 }
872
873 spin_unlock_irqrestore(&vio_cmo.lock, flags);
874}
875
876static void vio_cmo_set_dma_ops(struct vio_dev *viodev)
877{
878 set_dma_ops(&viodev->dev, &vio_dma_mapping_ops);
879}
880
881/**
882 * vio_cmo_bus_init - CMO entitlement initialization at bus init time
883 *
884 * Set up the reserve and excess entitlement pools based on available
885 * system entitlement and the number of devices in the OF tree that
886 * require entitlement in the reserve pool.
887 */
888static void vio_cmo_bus_init(void)
889{
890 struct hvcall_mpp_data mpp_data;
891 int err;
892
893 memset(&vio_cmo, 0, sizeof(struct vio_cmo));
894 spin_lock_init(&vio_cmo.lock);
895 INIT_LIST_HEAD(&vio_cmo.device_list);
896 INIT_DELAYED_WORK(&vio_cmo.balance_q, vio_cmo_balance);
897
898 /* Get current system entitlement */
899 err = h_get_mpp(&mpp_data);
900
901 /*
902 * On failure, continue with entitlement set to 0, will panic()
903 * later when spare is reserved.
904 */
905 if (err != H_SUCCESS) {
906 printk(KERN_ERR "%s: unable to determine system IO "\
907 "entitlement. (%d)\n", __func__, err);
908 vio_cmo.entitled = 0;
909 } else {
910 vio_cmo.entitled = mpp_data.entitled_mem;
911 }
912
913 /* Set reservation and check against entitlement */
914 vio_cmo.spare = VIO_CMO_MIN_ENT;
915 vio_cmo.reserve.size = vio_cmo.spare;
916 vio_cmo.reserve.size += (vio_cmo_num_OF_devs() *
917 VIO_CMO_MIN_ENT);
918 if (vio_cmo.reserve.size > vio_cmo.entitled) {
919 printk(KERN_ERR "%s: insufficient system entitlement\n",
920 __func__);
921 panic("%s: Insufficient system entitlement", __func__);
922 }
923
924 /* Set the remaining accounting variables */
925 vio_cmo.excess.size = vio_cmo.entitled - vio_cmo.reserve.size;
926 vio_cmo.excess.free = vio_cmo.excess.size;
927 vio_cmo.min = vio_cmo.reserve.size;
928 vio_cmo.desired = vio_cmo.reserve.size;
929}
930
931/* sysfs device functions and data structures for CMO */
932
933#define viodev_cmo_rd_attr(name) \
934static ssize_t cmo_##name##_show(struct device *dev, \
935 struct device_attribute *attr, \
936 char *buf) \
937{ \
938 return sprintf(buf, "%lu\n", to_vio_dev(dev)->cmo.name); \
939}
940
941static ssize_t cmo_allocs_failed_show(struct device *dev,
942 struct device_attribute *attr, char *buf)
943{
944 struct vio_dev *viodev = to_vio_dev(dev);
945 return sprintf(buf, "%d\n", atomic_read(&viodev->cmo.allocs_failed));
946}
947
948static ssize_t cmo_allocs_failed_store(struct device *dev,
949 struct device_attribute *attr, const char *buf, size_t count)
950{
951 struct vio_dev *viodev = to_vio_dev(dev);
952 atomic_set(&viodev->cmo.allocs_failed, 0);
953 return count;
954}
955
956static ssize_t cmo_desired_store(struct device *dev,
957 struct device_attribute *attr, const char *buf, size_t count)
958{
959 struct vio_dev *viodev = to_vio_dev(dev);
960 size_t new_desired;
961 int ret;
962
963 ret = kstrtoul(buf, 10, &new_desired);
964 if (ret)
965 return ret;
966
967 vio_cmo_set_dev_desired(viodev, new_desired);
968 return count;
969}
970
971viodev_cmo_rd_attr(desired);
972viodev_cmo_rd_attr(entitled);
973viodev_cmo_rd_attr(allocated);
974
975static ssize_t name_show(struct device *, struct device_attribute *, char *);
976static ssize_t devspec_show(struct device *, struct device_attribute *, char *);
977static ssize_t modalias_show(struct device *dev, struct device_attribute *attr,
978 char *buf);
979
980static struct device_attribute dev_attr_name;
981static struct device_attribute dev_attr_devspec;
982static struct device_attribute dev_attr_modalias;
983
984static DEVICE_ATTR_RO(cmo_entitled);
985static DEVICE_ATTR_RO(cmo_allocated);
986static DEVICE_ATTR_RW(cmo_desired);
987static DEVICE_ATTR_RW(cmo_allocs_failed);
988
989static struct attribute *vio_cmo_dev_attrs[] = {
990 &dev_attr_name.attr,
991 &dev_attr_devspec.attr,
992 &dev_attr_modalias.attr,
993 &dev_attr_cmo_entitled.attr,
994 &dev_attr_cmo_allocated.attr,
995 &dev_attr_cmo_desired.attr,
996 &dev_attr_cmo_allocs_failed.attr,
997 NULL,
998};
999ATTRIBUTE_GROUPS(vio_cmo_dev);
1000
1001/* sysfs bus functions and data structures for CMO */
1002
1003#define viobus_cmo_rd_attr(name) \
1004static ssize_t cmo_bus_##name##_show(struct bus_type *bt, char *buf) \
1005{ \
1006 return sprintf(buf, "%lu\n", vio_cmo.name); \
1007} \
1008static struct bus_attribute bus_attr_cmo_bus_##name = \
1009 __ATTR(cmo_##name, S_IRUGO, cmo_bus_##name##_show, NULL)
1010
1011#define viobus_cmo_pool_rd_attr(name, var) \
1012static ssize_t \
1013cmo_##name##_##var##_show(struct bus_type *bt, char *buf) \
1014{ \
1015 return sprintf(buf, "%lu\n", vio_cmo.name.var); \
1016} \
1017static BUS_ATTR_RO(cmo_##name##_##var)
1018
1019viobus_cmo_rd_attr(entitled);
1020viobus_cmo_rd_attr(spare);
1021viobus_cmo_rd_attr(min);
1022viobus_cmo_rd_attr(desired);
1023viobus_cmo_rd_attr(curr);
1024viobus_cmo_pool_rd_attr(reserve, size);
1025viobus_cmo_pool_rd_attr(excess, size);
1026viobus_cmo_pool_rd_attr(excess, free);
1027
1028static ssize_t cmo_high_show(struct bus_type *bt, char *buf)
1029{
1030 return sprintf(buf, "%lu\n", vio_cmo.high);
1031}
1032
1033static ssize_t cmo_high_store(struct bus_type *bt, const char *buf,
1034 size_t count)
1035{
1036 unsigned long flags;
1037
1038 spin_lock_irqsave(&vio_cmo.lock, flags);
1039 vio_cmo.high = vio_cmo.curr;
1040 spin_unlock_irqrestore(&vio_cmo.lock, flags);
1041
1042 return count;
1043}
1044static BUS_ATTR_RW(cmo_high);
1045
1046static struct attribute *vio_bus_attrs[] = {
1047 &bus_attr_cmo_bus_entitled.attr,
1048 &bus_attr_cmo_bus_spare.attr,
1049 &bus_attr_cmo_bus_min.attr,
1050 &bus_attr_cmo_bus_desired.attr,
1051 &bus_attr_cmo_bus_curr.attr,
1052 &bus_attr_cmo_high.attr,
1053 &bus_attr_cmo_reserve_size.attr,
1054 &bus_attr_cmo_excess_size.attr,
1055 &bus_attr_cmo_excess_free.attr,
1056 NULL,
1057};
1058ATTRIBUTE_GROUPS(vio_bus);
1059
1060static void vio_cmo_sysfs_init(void)
1061{
1062 vio_bus_type.dev_groups = vio_cmo_dev_groups;
1063 vio_bus_type.bus_groups = vio_bus_groups;
1064}
1065#else /* CONFIG_PPC_SMLPAR */
1066int vio_cmo_entitlement_update(size_t new_entitlement) { return 0; }
1067void vio_cmo_set_dev_desired(struct vio_dev *viodev, size_t desired) {}
1068static int vio_cmo_bus_probe(struct vio_dev *viodev) { return 0; }
1069static void vio_cmo_bus_remove(struct vio_dev *viodev) {}
1070static void vio_cmo_set_dma_ops(struct vio_dev *viodev) {}
1071static void vio_cmo_bus_init(void) {}
1072static void vio_cmo_sysfs_init(void) { }
1073#endif /* CONFIG_PPC_SMLPAR */
1074EXPORT_SYMBOL(vio_cmo_entitlement_update);
1075EXPORT_SYMBOL(vio_cmo_set_dev_desired);
1076
1077
1078/*
1079 * Platform Facilities Option (PFO) support
1080 */
1081
1082/**
1083 * vio_h_cop_sync - Perform a synchronous PFO co-processor operation
1084 *
1085 * @vdev - Pointer to a struct vio_dev for device
1086 * @op - Pointer to a struct vio_pfo_op for the operation parameters
1087 *
1088 * Calls the hypervisor to synchronously perform the PFO operation
1089 * described in @op. In the case of a busy response from the hypervisor,
1090 * the operation will be re-submitted indefinitely unless a non-zero timeout
1091 * is specified or an error occurs. The timeout places a limit on when to
1092 * stop re-submitting a operation, the total time can be exceeded if an
1093 * operation is in progress.
1094 *
1095 * If op->hcall_ret is not NULL, this will be set to the return from the
1096 * last h_cop_op call or it will be 0 if an error not involving the h_call
1097 * was encountered.
1098 *
1099 * Returns:
1100 * 0 on success,
1101 * -EINVAL if the h_call fails due to an invalid parameter,
1102 * -E2BIG if the h_call can not be performed synchronously,
1103 * -EBUSY if a timeout is specified and has elapsed,
1104 * -EACCES if the memory area for data/status has been rescinded, or
1105 * -EPERM if a hardware fault has been indicated
1106 */
1107int vio_h_cop_sync(struct vio_dev *vdev, struct vio_pfo_op *op)
1108{
1109 struct device *dev = &vdev->dev;
1110 unsigned long deadline = 0;
1111 long hret = 0;
1112 int ret = 0;
1113
1114 if (op->timeout)
1115 deadline = jiffies + msecs_to_jiffies(op->timeout);
1116
1117 while (true) {
1118 hret = plpar_hcall_norets(H_COP, op->flags,
1119 vdev->resource_id,
1120 op->in, op->inlen, op->out,
1121 op->outlen, op->csbcpb);
1122
1123 if (hret == H_SUCCESS ||
1124 (hret != H_NOT_ENOUGH_RESOURCES &&
1125 hret != H_BUSY && hret != H_RESOURCE) ||
1126 (op->timeout && time_after(deadline, jiffies)))
1127 break;
1128
1129 dev_dbg(dev, "%s: hcall ret(%ld), retrying.\n", __func__, hret);
1130 }
1131
1132 switch (hret) {
1133 case H_SUCCESS:
1134 ret = 0;
1135 break;
1136 case H_OP_MODE:
1137 case H_TOO_BIG:
1138 ret = -E2BIG;
1139 break;
1140 case H_RESCINDED:
1141 ret = -EACCES;
1142 break;
1143 case H_HARDWARE:
1144 ret = -EPERM;
1145 break;
1146 case H_NOT_ENOUGH_RESOURCES:
1147 case H_RESOURCE:
1148 case H_BUSY:
1149 ret = -EBUSY;
1150 break;
1151 default:
1152 ret = -EINVAL;
1153 break;
1154 }
1155
1156 if (ret)
1157 dev_dbg(dev, "%s: Sync h_cop_op failure (ret:%d) (hret:%ld)\n",
1158 __func__, ret, hret);
1159
1160 op->hcall_err = hret;
1161 return ret;
1162}
1163EXPORT_SYMBOL(vio_h_cop_sync);
1164
1165static struct iommu_table *vio_build_iommu_table(struct vio_dev *dev)
1166{
1167 const __be32 *dma_window;
1168 struct iommu_table *tbl;
1169 unsigned long offset, size;
1170
1171 dma_window = of_get_property(dev->dev.of_node,
1172 "ibm,my-dma-window", NULL);
1173 if (!dma_window)
1174 return NULL;
1175
1176 tbl = kzalloc(sizeof(*tbl), GFP_KERNEL);
1177 if (tbl == NULL)
1178 return NULL;
1179
1180 kref_init(&tbl->it_kref);
1181
1182 of_parse_dma_window(dev->dev.of_node, dma_window,
1183 &tbl->it_index, &offset, &size);
1184
1185 /* TCE table size - measured in tce entries */
1186 tbl->it_page_shift = IOMMU_PAGE_SHIFT_4K;
1187 tbl->it_size = size >> tbl->it_page_shift;
1188 /* offset for VIO should always be 0 */
1189 tbl->it_offset = offset >> tbl->it_page_shift;
1190 tbl->it_busno = 0;
1191 tbl->it_type = TCE_VB;
1192 tbl->it_blocksize = 16;
1193
1194 if (firmware_has_feature(FW_FEATURE_LPAR))
1195 tbl->it_ops = &iommu_table_lpar_multi_ops;
1196 else
1197 tbl->it_ops = &iommu_table_pseries_ops;
1198
1199 return iommu_init_table(tbl, -1, 0, 0);
1200}
1201
1202/**
1203 * vio_match_device: - Tell if a VIO device has a matching
1204 * VIO device id structure.
1205 * @ids: array of VIO device id structures to search in
1206 * @dev: the VIO device structure to match against
1207 *
1208 * Used by a driver to check whether a VIO device present in the
1209 * system is in its list of supported devices. Returns the matching
1210 * vio_device_id structure or NULL if there is no match.
1211 */
1212static const struct vio_device_id *vio_match_device(
1213 const struct vio_device_id *ids, const struct vio_dev *dev)
1214{
1215 while (ids->type[0] != '\0') {
1216 if ((strncmp(dev->type, ids->type, strlen(ids->type)) == 0) &&
1217 of_device_is_compatible(dev->dev.of_node,
1218 ids->compat))
1219 return ids;
1220 ids++;
1221 }
1222 return NULL;
1223}
1224
1225/*
1226 * Convert from struct device to struct vio_dev and pass to driver.
1227 * dev->driver has already been set by generic code because vio_bus_match
1228 * succeeded.
1229 */
1230static int vio_bus_probe(struct device *dev)
1231{
1232 struct vio_dev *viodev = to_vio_dev(dev);
1233 struct vio_driver *viodrv = to_vio_driver(dev->driver);
1234 const struct vio_device_id *id;
1235 int error = -ENODEV;
1236
1237 if (!viodrv->probe)
1238 return error;
1239
1240 id = vio_match_device(viodrv->id_table, viodev);
1241 if (id) {
1242 memset(&viodev->cmo, 0, sizeof(viodev->cmo));
1243 if (firmware_has_feature(FW_FEATURE_CMO)) {
1244 error = vio_cmo_bus_probe(viodev);
1245 if (error)
1246 return error;
1247 }
1248 error = viodrv->probe(viodev, id);
1249 if (error && firmware_has_feature(FW_FEATURE_CMO))
1250 vio_cmo_bus_remove(viodev);
1251 }
1252
1253 return error;
1254}
1255
1256/* convert from struct device to struct vio_dev and pass to driver. */
1257static int vio_bus_remove(struct device *dev)
1258{
1259 struct vio_dev *viodev = to_vio_dev(dev);
1260 struct vio_driver *viodrv = to_vio_driver(dev->driver);
1261 struct device *devptr;
1262 int ret = 1;
1263
1264 /*
1265 * Hold a reference to the device after the remove function is called
1266 * to allow for CMO accounting cleanup for the device.
1267 */
1268 devptr = get_device(dev);
1269
1270 if (viodrv->remove)
1271 ret = viodrv->remove(viodev);
1272
1273 if (!ret && firmware_has_feature(FW_FEATURE_CMO))
1274 vio_cmo_bus_remove(viodev);
1275
1276 put_device(devptr);
1277 return ret;
1278}
1279
1280/**
1281 * vio_register_driver: - Register a new vio driver
1282 * @viodrv: The vio_driver structure to be registered.
1283 */
1284int __vio_register_driver(struct vio_driver *viodrv, struct module *owner,
1285 const char *mod_name)
1286{
1287 pr_debug("%s: driver %s registering\n", __func__, viodrv->name);
1288
1289 /* fill in 'struct driver' fields */
1290 viodrv->driver.name = viodrv->name;
1291 viodrv->driver.pm = viodrv->pm;
1292 viodrv->driver.bus = &vio_bus_type;
1293 viodrv->driver.owner = owner;
1294 viodrv->driver.mod_name = mod_name;
1295
1296 return driver_register(&viodrv->driver);
1297}
1298EXPORT_SYMBOL(__vio_register_driver);
1299
1300/**
1301 * vio_unregister_driver - Remove registration of vio driver.
1302 * @viodrv: The vio_driver struct to be removed form registration
1303 */
1304void vio_unregister_driver(struct vio_driver *viodrv)
1305{
1306 driver_unregister(&viodrv->driver);
1307}
1308EXPORT_SYMBOL(vio_unregister_driver);
1309
1310/* vio_dev refcount hit 0 */
1311static void vio_dev_release(struct device *dev)
1312{
1313 struct iommu_table *tbl = get_iommu_table_base(dev);
1314
1315 if (tbl)
1316 iommu_tce_table_put(tbl);
1317 of_node_put(dev->of_node);
1318 kfree(to_vio_dev(dev));
1319}
1320
1321/**
1322 * vio_register_device_node: - Register a new vio device.
1323 * @of_node: The OF node for this device.
1324 *
1325 * Creates and initializes a vio_dev structure from the data in
1326 * of_node and adds it to the list of virtual devices.
1327 * Returns a pointer to the created vio_dev or NULL if node has
1328 * NULL device_type or compatible fields.
1329 */
1330struct vio_dev *vio_register_device_node(struct device_node *of_node)
1331{
1332 struct vio_dev *viodev;
1333 struct device_node *parent_node;
1334 const __be32 *prop;
1335 enum vio_dev_family family;
1336
1337 /*
1338 * Determine if this node is a under the /vdevice node or under the
1339 * /ibm,platform-facilities node. This decides the device's family.
1340 */
1341 parent_node = of_get_parent(of_node);
1342 if (parent_node) {
1343 if (of_node_is_type(parent_node, "ibm,platform-facilities"))
1344 family = PFO;
1345 else if (of_node_is_type(parent_node, "vdevice"))
1346 family = VDEVICE;
1347 else {
1348 pr_warn("%s: parent(%pOF) of %pOFn not recognized.\n",
1349 __func__,
1350 parent_node,
1351 of_node);
1352 of_node_put(parent_node);
1353 return NULL;
1354 }
1355 of_node_put(parent_node);
1356 } else {
1357 pr_warn("%s: could not determine the parent of node %pOFn.\n",
1358 __func__, of_node);
1359 return NULL;
1360 }
1361
1362 if (family == PFO) {
1363 if (of_get_property(of_node, "interrupt-controller", NULL)) {
1364 pr_debug("%s: Skipping the interrupt controller %pOFn.\n",
1365 __func__, of_node);
1366 return NULL;
1367 }
1368 }
1369
1370 /* allocate a vio_dev for this node */
1371 viodev = kzalloc(sizeof(struct vio_dev), GFP_KERNEL);
1372 if (viodev == NULL) {
1373 pr_warn("%s: allocation failure for VIO device.\n", __func__);
1374 return NULL;
1375 }
1376
1377 /* we need the 'device_type' property, in order to match with drivers */
1378 viodev->family = family;
1379 if (viodev->family == VDEVICE) {
1380 unsigned int unit_address;
1381
1382 viodev->type = of_node_get_device_type(of_node);
1383 if (!viodev->type) {
1384 pr_warn("%s: node %pOFn is missing the 'device_type' "
1385 "property.\n", __func__, of_node);
1386 goto out;
1387 }
1388
1389 prop = of_get_property(of_node, "reg", NULL);
1390 if (prop == NULL) {
1391 pr_warn("%s: node %pOFn missing 'reg'\n",
1392 __func__, of_node);
1393 goto out;
1394 }
1395 unit_address = of_read_number(prop, 1);
1396 dev_set_name(&viodev->dev, "%x", unit_address);
1397 viodev->irq = irq_of_parse_and_map(of_node, 0);
1398 viodev->unit_address = unit_address;
1399 } else {
1400 /* PFO devices need their resource_id for submitting COP_OPs
1401 * This is an optional field for devices, but is required when
1402 * performing synchronous ops */
1403 prop = of_get_property(of_node, "ibm,resource-id", NULL);
1404 if (prop != NULL)
1405 viodev->resource_id = of_read_number(prop, 1);
1406
1407 dev_set_name(&viodev->dev, "%pOFn", of_node);
1408 viodev->type = dev_name(&viodev->dev);
1409 viodev->irq = 0;
1410 }
1411
1412 viodev->name = of_node->name;
1413 viodev->dev.of_node = of_node_get(of_node);
1414
1415 set_dev_node(&viodev->dev, of_node_to_nid(of_node));
1416
1417 /* init generic 'struct device' fields: */
1418 viodev->dev.parent = &vio_bus_device.dev;
1419 viodev->dev.bus = &vio_bus_type;
1420 viodev->dev.release = vio_dev_release;
1421
1422 if (of_get_property(viodev->dev.of_node, "ibm,my-dma-window", NULL)) {
1423 if (firmware_has_feature(FW_FEATURE_CMO))
1424 vio_cmo_set_dma_ops(viodev);
1425 else
1426 set_dma_ops(&viodev->dev, &dma_iommu_ops);
1427
1428 set_iommu_table_base(&viodev->dev,
1429 vio_build_iommu_table(viodev));
1430
1431 /* needed to ensure proper operation of coherent allocations
1432 * later, in case driver doesn't set it explicitly */
1433 viodev->dev.coherent_dma_mask = DMA_BIT_MASK(64);
1434 viodev->dev.dma_mask = &viodev->dev.coherent_dma_mask;
1435 }
1436
1437 /* register with generic device framework */
1438 if (device_register(&viodev->dev)) {
1439 printk(KERN_ERR "%s: failed to register device %s\n",
1440 __func__, dev_name(&viodev->dev));
1441 put_device(&viodev->dev);
1442 return NULL;
1443 }
1444
1445 return viodev;
1446
1447out: /* Use this exit point for any return prior to device_register */
1448 kfree(viodev);
1449
1450 return NULL;
1451}
1452EXPORT_SYMBOL(vio_register_device_node);
1453
1454/*
1455 * vio_bus_scan_for_devices - Scan OF and register each child device
1456 * @root_name - OF node name for the root of the subtree to search.
1457 * This must be non-NULL
1458 *
1459 * Starting from the root node provide, register the device node for
1460 * each child beneath the root.
1461 */
1462static void vio_bus_scan_register_devices(char *root_name)
1463{
1464 struct device_node *node_root, *node_child;
1465
1466 if (!root_name)
1467 return;
1468
1469 node_root = of_find_node_by_name(NULL, root_name);
1470 if (node_root) {
1471
1472 /*
1473 * Create struct vio_devices for each virtual device in
1474 * the device tree. Drivers will associate with them later.
1475 */
1476 node_child = of_get_next_child(node_root, NULL);
1477 while (node_child) {
1478 vio_register_device_node(node_child);
1479 node_child = of_get_next_child(node_root, node_child);
1480 }
1481 of_node_put(node_root);
1482 }
1483}
1484
1485/**
1486 * vio_bus_init: - Initialize the virtual IO bus
1487 */
1488static int __init vio_bus_init(void)
1489{
1490 int err;
1491
1492 if (firmware_has_feature(FW_FEATURE_CMO))
1493 vio_cmo_sysfs_init();
1494
1495 err = bus_register(&vio_bus_type);
1496 if (err) {
1497 printk(KERN_ERR "failed to register VIO bus\n");
1498 return err;
1499 }
1500
1501 /*
1502 * The fake parent of all vio devices, just to give us
1503 * a nice directory
1504 */
1505 err = device_register(&vio_bus_device.dev);
1506 if (err) {
1507 printk(KERN_WARNING "%s: device_register returned %i\n",
1508 __func__, err);
1509 return err;
1510 }
1511
1512 if (firmware_has_feature(FW_FEATURE_CMO))
1513 vio_cmo_bus_init();
1514
1515 return 0;
1516}
1517machine_postcore_initcall(pseries, vio_bus_init);
1518
1519static int __init vio_device_init(void)
1520{
1521 vio_bus_scan_register_devices("vdevice");
1522 vio_bus_scan_register_devices("ibm,platform-facilities");
1523
1524 return 0;
1525}
1526machine_device_initcall(pseries, vio_device_init);
1527
1528static ssize_t name_show(struct device *dev,
1529 struct device_attribute *attr, char *buf)
1530{
1531 return sprintf(buf, "%s\n", to_vio_dev(dev)->name);
1532}
1533static DEVICE_ATTR_RO(name);
1534
1535static ssize_t devspec_show(struct device *dev,
1536 struct device_attribute *attr, char *buf)
1537{
1538 struct device_node *of_node = dev->of_node;
1539
1540 return sprintf(buf, "%pOF\n", of_node);
1541}
1542static DEVICE_ATTR_RO(devspec);
1543
1544static ssize_t modalias_show(struct device *dev, struct device_attribute *attr,
1545 char *buf)
1546{
1547 const struct vio_dev *vio_dev = to_vio_dev(dev);
1548 struct device_node *dn;
1549 const char *cp;
1550
1551 dn = dev->of_node;
1552 if (!dn) {
1553 strcpy(buf, "\n");
1554 return strlen(buf);
1555 }
1556 cp = of_get_property(dn, "compatible", NULL);
1557 if (!cp) {
1558 strcpy(buf, "\n");
1559 return strlen(buf);
1560 }
1561
1562 return sprintf(buf, "vio:T%sS%s\n", vio_dev->type, cp);
1563}
1564static DEVICE_ATTR_RO(modalias);
1565
1566static struct attribute *vio_dev_attrs[] = {
1567 &dev_attr_name.attr,
1568 &dev_attr_devspec.attr,
1569 &dev_attr_modalias.attr,
1570 NULL,
1571};
1572ATTRIBUTE_GROUPS(vio_dev);
1573
1574void vio_unregister_device(struct vio_dev *viodev)
1575{
1576 device_unregister(&viodev->dev);
1577 if (viodev->family == VDEVICE)
1578 irq_dispose_mapping(viodev->irq);
1579}
1580EXPORT_SYMBOL(vio_unregister_device);
1581
1582static int vio_bus_match(struct device *dev, struct device_driver *drv)
1583{
1584 const struct vio_dev *vio_dev = to_vio_dev(dev);
1585 struct vio_driver *vio_drv = to_vio_driver(drv);
1586 const struct vio_device_id *ids = vio_drv->id_table;
1587
1588 return (ids != NULL) && (vio_match_device(ids, vio_dev) != NULL);
1589}
1590
1591static int vio_hotplug(struct device *dev, struct kobj_uevent_env *env)
1592{
1593 const struct vio_dev *vio_dev = to_vio_dev(dev);
1594 struct device_node *dn;
1595 const char *cp;
1596
1597 dn = dev->of_node;
1598 if (!dn)
1599 return -ENODEV;
1600 cp = of_get_property(dn, "compatible", NULL);
1601 if (!cp)
1602 return -ENODEV;
1603
1604 add_uevent_var(env, "MODALIAS=vio:T%sS%s", vio_dev->type, cp);
1605 return 0;
1606}
1607
1608struct bus_type vio_bus_type = {
1609 .name = "vio",
1610 .dev_groups = vio_dev_groups,
1611 .uevent = vio_hotplug,
1612 .match = vio_bus_match,
1613 .probe = vio_bus_probe,
1614 .remove = vio_bus_remove,
1615};
1616
1617/**
1618 * vio_get_attribute: - get attribute for virtual device
1619 * @vdev: The vio device to get property.
1620 * @which: The property/attribute to be extracted.
1621 * @length: Pointer to length of returned data size (unused if NULL).
1622 *
1623 * Calls prom.c's of_get_property() to return the value of the
1624 * attribute specified by @which
1625*/
1626const void *vio_get_attribute(struct vio_dev *vdev, char *which, int *length)
1627{
1628 return of_get_property(vdev->dev.of_node, which, length);
1629}
1630EXPORT_SYMBOL(vio_get_attribute);
1631
1632/* vio_find_name() - internal because only vio.c knows how we formatted the
1633 * kobject name
1634 */
1635static struct vio_dev *vio_find_name(const char *name)
1636{
1637 struct device *found;
1638
1639 found = bus_find_device_by_name(&vio_bus_type, NULL, name);
1640 if (!found)
1641 return NULL;
1642
1643 return to_vio_dev(found);
1644}
1645
1646/**
1647 * vio_find_node - find an already-registered vio_dev
1648 * @vnode: device_node of the virtual device we're looking for
1649 *
1650 * Takes a reference to the embedded struct device which needs to be dropped
1651 * after use.
1652 */
1653struct vio_dev *vio_find_node(struct device_node *vnode)
1654{
1655 char kobj_name[20];
1656 struct device_node *vnode_parent;
1657
1658 vnode_parent = of_get_parent(vnode);
1659 if (!vnode_parent)
1660 return NULL;
1661
1662 /* construct the kobject name from the device node */
1663 if (of_node_is_type(vnode_parent, "vdevice")) {
1664 const __be32 *prop;
1665
1666 prop = of_get_property(vnode, "reg", NULL);
1667 if (!prop)
1668 goto out;
1669 snprintf(kobj_name, sizeof(kobj_name), "%x",
1670 (uint32_t)of_read_number(prop, 1));
1671 } else if (of_node_is_type(vnode_parent, "ibm,platform-facilities"))
1672 snprintf(kobj_name, sizeof(kobj_name), "%pOFn", vnode);
1673 else
1674 goto out;
1675
1676 of_node_put(vnode_parent);
1677 return vio_find_name(kobj_name);
1678out:
1679 of_node_put(vnode_parent);
1680 return NULL;
1681}
1682EXPORT_SYMBOL(vio_find_node);
1683
1684int vio_enable_interrupts(struct vio_dev *dev)
1685{
1686 int rc = h_vio_signal(dev->unit_address, VIO_IRQ_ENABLE);
1687 if (rc != H_SUCCESS)
1688 printk(KERN_ERR "vio: Error 0x%x enabling interrupts\n", rc);
1689 return rc;
1690}
1691EXPORT_SYMBOL(vio_enable_interrupts);
1692
1693int vio_disable_interrupts(struct vio_dev *dev)
1694{
1695 int rc = h_vio_signal(dev->unit_address, VIO_IRQ_DISABLE);
1696 if (rc != H_SUCCESS)
1697 printk(KERN_ERR "vio: Error 0x%x disabling interrupts\n", rc);
1698 return rc;
1699}
1700EXPORT_SYMBOL(vio_disable_interrupts);
1701
1702static int __init vio_init(void)
1703{
1704 dma_debug_add_bus(&vio_bus_type);
1705 return 0;
1706}
1707machine_fs_initcall(pseries, vio_init);