Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Basic general purpose allocator for managing special purpose
4 * memory, for example, memory that is not managed by the regular
5 * kmalloc/kfree interface. Uses for this includes on-device special
6 * memory, uncached memory etc.
7 *
8 * It is safe to use the allocator in NMI handlers and other special
9 * unblockable contexts that could otherwise deadlock on locks. This
10 * is implemented by using atomic operations and retries on any
11 * conflicts. The disadvantage is that there may be livelocks in
12 * extreme cases. For better scalability, one allocator can be used
13 * for each CPU.
14 *
15 * The lockless operation only works if there is enough memory
16 * available. If new memory is added to the pool a lock has to be
17 * still taken. So any user relying on locklessness has to ensure
18 * that sufficient memory is preallocated.
19 *
20 * The basic atomic operation of this allocator is cmpxchg on long.
21 * On architectures that don't have NMI-safe cmpxchg implementation,
22 * the allocator can NOT be used in NMI handler. So code uses the
23 * allocator in NMI handler should depend on
24 * CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG.
25 *
26 * Copyright 2005 (C) Jes Sorensen <jes@trained-monkey.org>
27 */
28
29#include <linux/slab.h>
30#include <linux/export.h>
31#include <linux/bitmap.h>
32#include <linux/rculist.h>
33#include <linux/interrupt.h>
34#include <linux/genalloc.h>
35#include <linux/of_device.h>
36#include <linux/vmalloc.h>
37
38static inline size_t chunk_size(const struct gen_pool_chunk *chunk)
39{
40 return chunk->end_addr - chunk->start_addr + 1;
41}
42
43static int set_bits_ll(unsigned long *addr, unsigned long mask_to_set)
44{
45 unsigned long val, nval;
46
47 nval = *addr;
48 do {
49 val = nval;
50 if (val & mask_to_set)
51 return -EBUSY;
52 cpu_relax();
53 } while ((nval = cmpxchg(addr, val, val | mask_to_set)) != val);
54
55 return 0;
56}
57
58static int clear_bits_ll(unsigned long *addr, unsigned long mask_to_clear)
59{
60 unsigned long val, nval;
61
62 nval = *addr;
63 do {
64 val = nval;
65 if ((val & mask_to_clear) != mask_to_clear)
66 return -EBUSY;
67 cpu_relax();
68 } while ((nval = cmpxchg(addr, val, val & ~mask_to_clear)) != val);
69
70 return 0;
71}
72
73/*
74 * bitmap_set_ll - set the specified number of bits at the specified position
75 * @map: pointer to a bitmap
76 * @start: a bit position in @map
77 * @nr: number of bits to set
78 *
79 * Set @nr bits start from @start in @map lock-lessly. Several users
80 * can set/clear the same bitmap simultaneously without lock. If two
81 * users set the same bit, one user will return remain bits, otherwise
82 * return 0.
83 */
84static int bitmap_set_ll(unsigned long *map, int start, int nr)
85{
86 unsigned long *p = map + BIT_WORD(start);
87 const int size = start + nr;
88 int bits_to_set = BITS_PER_LONG - (start % BITS_PER_LONG);
89 unsigned long mask_to_set = BITMAP_FIRST_WORD_MASK(start);
90
91 while (nr - bits_to_set >= 0) {
92 if (set_bits_ll(p, mask_to_set))
93 return nr;
94 nr -= bits_to_set;
95 bits_to_set = BITS_PER_LONG;
96 mask_to_set = ~0UL;
97 p++;
98 }
99 if (nr) {
100 mask_to_set &= BITMAP_LAST_WORD_MASK(size);
101 if (set_bits_ll(p, mask_to_set))
102 return nr;
103 }
104
105 return 0;
106}
107
108/*
109 * bitmap_clear_ll - clear the specified number of bits at the specified position
110 * @map: pointer to a bitmap
111 * @start: a bit position in @map
112 * @nr: number of bits to set
113 *
114 * Clear @nr bits start from @start in @map lock-lessly. Several users
115 * can set/clear the same bitmap simultaneously without lock. If two
116 * users clear the same bit, one user will return remain bits,
117 * otherwise return 0.
118 */
119static int bitmap_clear_ll(unsigned long *map, int start, int nr)
120{
121 unsigned long *p = map + BIT_WORD(start);
122 const int size = start + nr;
123 int bits_to_clear = BITS_PER_LONG - (start % BITS_PER_LONG);
124 unsigned long mask_to_clear = BITMAP_FIRST_WORD_MASK(start);
125
126 while (nr - bits_to_clear >= 0) {
127 if (clear_bits_ll(p, mask_to_clear))
128 return nr;
129 nr -= bits_to_clear;
130 bits_to_clear = BITS_PER_LONG;
131 mask_to_clear = ~0UL;
132 p++;
133 }
134 if (nr) {
135 mask_to_clear &= BITMAP_LAST_WORD_MASK(size);
136 if (clear_bits_ll(p, mask_to_clear))
137 return nr;
138 }
139
140 return 0;
141}
142
143/**
144 * gen_pool_create - create a new special memory pool
145 * @min_alloc_order: log base 2 of number of bytes each bitmap bit represents
146 * @nid: node id of the node the pool structure should be allocated on, or -1
147 *
148 * Create a new special memory pool that can be used to manage special purpose
149 * memory not managed by the regular kmalloc/kfree interface.
150 */
151struct gen_pool *gen_pool_create(int min_alloc_order, int nid)
152{
153 struct gen_pool *pool;
154
155 pool = kmalloc_node(sizeof(struct gen_pool), GFP_KERNEL, nid);
156 if (pool != NULL) {
157 spin_lock_init(&pool->lock);
158 INIT_LIST_HEAD(&pool->chunks);
159 pool->min_alloc_order = min_alloc_order;
160 pool->algo = gen_pool_first_fit;
161 pool->data = NULL;
162 pool->name = NULL;
163 }
164 return pool;
165}
166EXPORT_SYMBOL(gen_pool_create);
167
168/**
169 * gen_pool_add_owner- add a new chunk of special memory to the pool
170 * @pool: pool to add new memory chunk to
171 * @virt: virtual starting address of memory chunk to add to pool
172 * @phys: physical starting address of memory chunk to add to pool
173 * @size: size in bytes of the memory chunk to add to pool
174 * @nid: node id of the node the chunk structure and bitmap should be
175 * allocated on, or -1
176 * @owner: private data the publisher would like to recall at alloc time
177 *
178 * Add a new chunk of special memory to the specified pool.
179 *
180 * Returns 0 on success or a -ve errno on failure.
181 */
182int gen_pool_add_owner(struct gen_pool *pool, unsigned long virt, phys_addr_t phys,
183 size_t size, int nid, void *owner)
184{
185 struct gen_pool_chunk *chunk;
186 int nbits = size >> pool->min_alloc_order;
187 int nbytes = sizeof(struct gen_pool_chunk) +
188 BITS_TO_LONGS(nbits) * sizeof(long);
189
190 chunk = vzalloc_node(nbytes, nid);
191 if (unlikely(chunk == NULL))
192 return -ENOMEM;
193
194 chunk->phys_addr = phys;
195 chunk->start_addr = virt;
196 chunk->end_addr = virt + size - 1;
197 chunk->owner = owner;
198 atomic_long_set(&chunk->avail, size);
199
200 spin_lock(&pool->lock);
201 list_add_rcu(&chunk->next_chunk, &pool->chunks);
202 spin_unlock(&pool->lock);
203
204 return 0;
205}
206EXPORT_SYMBOL(gen_pool_add_owner);
207
208/**
209 * gen_pool_virt_to_phys - return the physical address of memory
210 * @pool: pool to allocate from
211 * @addr: starting address of memory
212 *
213 * Returns the physical address on success, or -1 on error.
214 */
215phys_addr_t gen_pool_virt_to_phys(struct gen_pool *pool, unsigned long addr)
216{
217 struct gen_pool_chunk *chunk;
218 phys_addr_t paddr = -1;
219
220 rcu_read_lock();
221 list_for_each_entry_rcu(chunk, &pool->chunks, next_chunk) {
222 if (addr >= chunk->start_addr && addr <= chunk->end_addr) {
223 paddr = chunk->phys_addr + (addr - chunk->start_addr);
224 break;
225 }
226 }
227 rcu_read_unlock();
228
229 return paddr;
230}
231EXPORT_SYMBOL(gen_pool_virt_to_phys);
232
233/**
234 * gen_pool_destroy - destroy a special memory pool
235 * @pool: pool to destroy
236 *
237 * Destroy the specified special memory pool. Verifies that there are no
238 * outstanding allocations.
239 */
240void gen_pool_destroy(struct gen_pool *pool)
241{
242 struct list_head *_chunk, *_next_chunk;
243 struct gen_pool_chunk *chunk;
244 int order = pool->min_alloc_order;
245 int bit, end_bit;
246
247 list_for_each_safe(_chunk, _next_chunk, &pool->chunks) {
248 chunk = list_entry(_chunk, struct gen_pool_chunk, next_chunk);
249 list_del(&chunk->next_chunk);
250
251 end_bit = chunk_size(chunk) >> order;
252 bit = find_next_bit(chunk->bits, end_bit, 0);
253 BUG_ON(bit < end_bit);
254
255 vfree(chunk);
256 }
257 kfree_const(pool->name);
258 kfree(pool);
259}
260EXPORT_SYMBOL(gen_pool_destroy);
261
262/**
263 * gen_pool_alloc_algo_owner - allocate special memory from the pool
264 * @pool: pool to allocate from
265 * @size: number of bytes to allocate from the pool
266 * @algo: algorithm passed from caller
267 * @data: data passed to algorithm
268 * @owner: optionally retrieve the chunk owner
269 *
270 * Allocate the requested number of bytes from the specified pool.
271 * Uses the pool allocation function (with first-fit algorithm by default).
272 * Can not be used in NMI handler on architectures without
273 * NMI-safe cmpxchg implementation.
274 */
275unsigned long gen_pool_alloc_algo_owner(struct gen_pool *pool, size_t size,
276 genpool_algo_t algo, void *data, void **owner)
277{
278 struct gen_pool_chunk *chunk;
279 unsigned long addr = 0;
280 int order = pool->min_alloc_order;
281 int nbits, start_bit, end_bit, remain;
282
283#ifndef CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG
284 BUG_ON(in_nmi());
285#endif
286
287 if (owner)
288 *owner = NULL;
289
290 if (size == 0)
291 return 0;
292
293 nbits = (size + (1UL << order) - 1) >> order;
294 rcu_read_lock();
295 list_for_each_entry_rcu(chunk, &pool->chunks, next_chunk) {
296 if (size > atomic_long_read(&chunk->avail))
297 continue;
298
299 start_bit = 0;
300 end_bit = chunk_size(chunk) >> order;
301retry:
302 start_bit = algo(chunk->bits, end_bit, start_bit,
303 nbits, data, pool, chunk->start_addr);
304 if (start_bit >= end_bit)
305 continue;
306 remain = bitmap_set_ll(chunk->bits, start_bit, nbits);
307 if (remain) {
308 remain = bitmap_clear_ll(chunk->bits, start_bit,
309 nbits - remain);
310 BUG_ON(remain);
311 goto retry;
312 }
313
314 addr = chunk->start_addr + ((unsigned long)start_bit << order);
315 size = nbits << order;
316 atomic_long_sub(size, &chunk->avail);
317 if (owner)
318 *owner = chunk->owner;
319 break;
320 }
321 rcu_read_unlock();
322 return addr;
323}
324EXPORT_SYMBOL(gen_pool_alloc_algo_owner);
325
326/**
327 * gen_pool_dma_alloc - allocate special memory from the pool for DMA usage
328 * @pool: pool to allocate from
329 * @size: number of bytes to allocate from the pool
330 * @dma: dma-view physical address return value. Use NULL if unneeded.
331 *
332 * Allocate the requested number of bytes from the specified pool.
333 * Uses the pool allocation function (with first-fit algorithm by default).
334 * Can not be used in NMI handler on architectures without
335 * NMI-safe cmpxchg implementation.
336 */
337void *gen_pool_dma_alloc(struct gen_pool *pool, size_t size, dma_addr_t *dma)
338{
339 unsigned long vaddr;
340
341 if (!pool)
342 return NULL;
343
344 vaddr = gen_pool_alloc(pool, size);
345 if (!vaddr)
346 return NULL;
347
348 if (dma)
349 *dma = gen_pool_virt_to_phys(pool, vaddr);
350
351 return (void *)vaddr;
352}
353EXPORT_SYMBOL(gen_pool_dma_alloc);
354
355/**
356 * gen_pool_free - free allocated special memory back to the pool
357 * @pool: pool to free to
358 * @addr: starting address of memory to free back to pool
359 * @size: size in bytes of memory to free
360 * @owner: private data stashed at gen_pool_add() time
361 *
362 * Free previously allocated special memory back to the specified
363 * pool. Can not be used in NMI handler on architectures without
364 * NMI-safe cmpxchg implementation.
365 */
366void gen_pool_free_owner(struct gen_pool *pool, unsigned long addr, size_t size,
367 void **owner)
368{
369 struct gen_pool_chunk *chunk;
370 int order = pool->min_alloc_order;
371 int start_bit, nbits, remain;
372
373#ifndef CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG
374 BUG_ON(in_nmi());
375#endif
376
377 if (owner)
378 *owner = NULL;
379
380 nbits = (size + (1UL << order) - 1) >> order;
381 rcu_read_lock();
382 list_for_each_entry_rcu(chunk, &pool->chunks, next_chunk) {
383 if (addr >= chunk->start_addr && addr <= chunk->end_addr) {
384 BUG_ON(addr + size - 1 > chunk->end_addr);
385 start_bit = (addr - chunk->start_addr) >> order;
386 remain = bitmap_clear_ll(chunk->bits, start_bit, nbits);
387 BUG_ON(remain);
388 size = nbits << order;
389 atomic_long_add(size, &chunk->avail);
390 if (owner)
391 *owner = chunk->owner;
392 rcu_read_unlock();
393 return;
394 }
395 }
396 rcu_read_unlock();
397 BUG();
398}
399EXPORT_SYMBOL(gen_pool_free_owner);
400
401/**
402 * gen_pool_for_each_chunk - call func for every chunk of generic memory pool
403 * @pool: the generic memory pool
404 * @func: func to call
405 * @data: additional data used by @func
406 *
407 * Call @func for every chunk of generic memory pool. The @func is
408 * called with rcu_read_lock held.
409 */
410void gen_pool_for_each_chunk(struct gen_pool *pool,
411 void (*func)(struct gen_pool *pool, struct gen_pool_chunk *chunk, void *data),
412 void *data)
413{
414 struct gen_pool_chunk *chunk;
415
416 rcu_read_lock();
417 list_for_each_entry_rcu(chunk, &(pool)->chunks, next_chunk)
418 func(pool, chunk, data);
419 rcu_read_unlock();
420}
421EXPORT_SYMBOL(gen_pool_for_each_chunk);
422
423/**
424 * addr_in_gen_pool - checks if an address falls within the range of a pool
425 * @pool: the generic memory pool
426 * @start: start address
427 * @size: size of the region
428 *
429 * Check if the range of addresses falls within the specified pool. Returns
430 * true if the entire range is contained in the pool and false otherwise.
431 */
432bool addr_in_gen_pool(struct gen_pool *pool, unsigned long start,
433 size_t size)
434{
435 bool found = false;
436 unsigned long end = start + size - 1;
437 struct gen_pool_chunk *chunk;
438
439 rcu_read_lock();
440 list_for_each_entry_rcu(chunk, &(pool)->chunks, next_chunk) {
441 if (start >= chunk->start_addr && start <= chunk->end_addr) {
442 if (end <= chunk->end_addr) {
443 found = true;
444 break;
445 }
446 }
447 }
448 rcu_read_unlock();
449 return found;
450}
451
452/**
453 * gen_pool_avail - get available free space of the pool
454 * @pool: pool to get available free space
455 *
456 * Return available free space of the specified pool.
457 */
458size_t gen_pool_avail(struct gen_pool *pool)
459{
460 struct gen_pool_chunk *chunk;
461 size_t avail = 0;
462
463 rcu_read_lock();
464 list_for_each_entry_rcu(chunk, &pool->chunks, next_chunk)
465 avail += atomic_long_read(&chunk->avail);
466 rcu_read_unlock();
467 return avail;
468}
469EXPORT_SYMBOL_GPL(gen_pool_avail);
470
471/**
472 * gen_pool_size - get size in bytes of memory managed by the pool
473 * @pool: pool to get size
474 *
475 * Return size in bytes of memory managed by the pool.
476 */
477size_t gen_pool_size(struct gen_pool *pool)
478{
479 struct gen_pool_chunk *chunk;
480 size_t size = 0;
481
482 rcu_read_lock();
483 list_for_each_entry_rcu(chunk, &pool->chunks, next_chunk)
484 size += chunk_size(chunk);
485 rcu_read_unlock();
486 return size;
487}
488EXPORT_SYMBOL_GPL(gen_pool_size);
489
490/**
491 * gen_pool_set_algo - set the allocation algorithm
492 * @pool: pool to change allocation algorithm
493 * @algo: custom algorithm function
494 * @data: additional data used by @algo
495 *
496 * Call @algo for each memory allocation in the pool.
497 * If @algo is NULL use gen_pool_first_fit as default
498 * memory allocation function.
499 */
500void gen_pool_set_algo(struct gen_pool *pool, genpool_algo_t algo, void *data)
501{
502 rcu_read_lock();
503
504 pool->algo = algo;
505 if (!pool->algo)
506 pool->algo = gen_pool_first_fit;
507
508 pool->data = data;
509
510 rcu_read_unlock();
511}
512EXPORT_SYMBOL(gen_pool_set_algo);
513
514/**
515 * gen_pool_first_fit - find the first available region
516 * of memory matching the size requirement (no alignment constraint)
517 * @map: The address to base the search on
518 * @size: The bitmap size in bits
519 * @start: The bitnumber to start searching at
520 * @nr: The number of zeroed bits we're looking for
521 * @data: additional data - unused
522 * @pool: pool to find the fit region memory from
523 */
524unsigned long gen_pool_first_fit(unsigned long *map, unsigned long size,
525 unsigned long start, unsigned int nr, void *data,
526 struct gen_pool *pool, unsigned long start_addr)
527{
528 return bitmap_find_next_zero_area(map, size, start, nr, 0);
529}
530EXPORT_SYMBOL(gen_pool_first_fit);
531
532/**
533 * gen_pool_first_fit_align - find the first available region
534 * of memory matching the size requirement (alignment constraint)
535 * @map: The address to base the search on
536 * @size: The bitmap size in bits
537 * @start: The bitnumber to start searching at
538 * @nr: The number of zeroed bits we're looking for
539 * @data: data for alignment
540 * @pool: pool to get order from
541 */
542unsigned long gen_pool_first_fit_align(unsigned long *map, unsigned long size,
543 unsigned long start, unsigned int nr, void *data,
544 struct gen_pool *pool, unsigned long start_addr)
545{
546 struct genpool_data_align *alignment;
547 unsigned long align_mask, align_off;
548 int order;
549
550 alignment = data;
551 order = pool->min_alloc_order;
552 align_mask = ((alignment->align + (1UL << order) - 1) >> order) - 1;
553 align_off = (start_addr & (alignment->align - 1)) >> order;
554
555 return bitmap_find_next_zero_area_off(map, size, start, nr,
556 align_mask, align_off);
557}
558EXPORT_SYMBOL(gen_pool_first_fit_align);
559
560/**
561 * gen_pool_fixed_alloc - reserve a specific region
562 * @map: The address to base the search on
563 * @size: The bitmap size in bits
564 * @start: The bitnumber to start searching at
565 * @nr: The number of zeroed bits we're looking for
566 * @data: data for alignment
567 * @pool: pool to get order from
568 */
569unsigned long gen_pool_fixed_alloc(unsigned long *map, unsigned long size,
570 unsigned long start, unsigned int nr, void *data,
571 struct gen_pool *pool, unsigned long start_addr)
572{
573 struct genpool_data_fixed *fixed_data;
574 int order;
575 unsigned long offset_bit;
576 unsigned long start_bit;
577
578 fixed_data = data;
579 order = pool->min_alloc_order;
580 offset_bit = fixed_data->offset >> order;
581 if (WARN_ON(fixed_data->offset & ((1UL << order) - 1)))
582 return size;
583
584 start_bit = bitmap_find_next_zero_area(map, size,
585 start + offset_bit, nr, 0);
586 if (start_bit != offset_bit)
587 start_bit = size;
588 return start_bit;
589}
590EXPORT_SYMBOL(gen_pool_fixed_alloc);
591
592/**
593 * gen_pool_first_fit_order_align - find the first available region
594 * of memory matching the size requirement. The region will be aligned
595 * to the order of the size specified.
596 * @map: The address to base the search on
597 * @size: The bitmap size in bits
598 * @start: The bitnumber to start searching at
599 * @nr: The number of zeroed bits we're looking for
600 * @data: additional data - unused
601 * @pool: pool to find the fit region memory from
602 */
603unsigned long gen_pool_first_fit_order_align(unsigned long *map,
604 unsigned long size, unsigned long start,
605 unsigned int nr, void *data, struct gen_pool *pool,
606 unsigned long start_addr)
607{
608 unsigned long align_mask = roundup_pow_of_two(nr) - 1;
609
610 return bitmap_find_next_zero_area(map, size, start, nr, align_mask);
611}
612EXPORT_SYMBOL(gen_pool_first_fit_order_align);
613
614/**
615 * gen_pool_best_fit - find the best fitting region of memory
616 * macthing the size requirement (no alignment constraint)
617 * @map: The address to base the search on
618 * @size: The bitmap size in bits
619 * @start: The bitnumber to start searching at
620 * @nr: The number of zeroed bits we're looking for
621 * @data: additional data - unused
622 * @pool: pool to find the fit region memory from
623 *
624 * Iterate over the bitmap to find the smallest free region
625 * which we can allocate the memory.
626 */
627unsigned long gen_pool_best_fit(unsigned long *map, unsigned long size,
628 unsigned long start, unsigned int nr, void *data,
629 struct gen_pool *pool, unsigned long start_addr)
630{
631 unsigned long start_bit = size;
632 unsigned long len = size + 1;
633 unsigned long index;
634
635 index = bitmap_find_next_zero_area(map, size, start, nr, 0);
636
637 while (index < size) {
638 int next_bit = find_next_bit(map, size, index + nr);
639 if ((next_bit - index) < len) {
640 len = next_bit - index;
641 start_bit = index;
642 if (len == nr)
643 return start_bit;
644 }
645 index = bitmap_find_next_zero_area(map, size,
646 next_bit + 1, nr, 0);
647 }
648
649 return start_bit;
650}
651EXPORT_SYMBOL(gen_pool_best_fit);
652
653static void devm_gen_pool_release(struct device *dev, void *res)
654{
655 gen_pool_destroy(*(struct gen_pool **)res);
656}
657
658static int devm_gen_pool_match(struct device *dev, void *res, void *data)
659{
660 struct gen_pool **p = res;
661
662 /* NULL data matches only a pool without an assigned name */
663 if (!data && !(*p)->name)
664 return 1;
665
666 if (!data || !(*p)->name)
667 return 0;
668
669 return !strcmp((*p)->name, data);
670}
671
672/**
673 * gen_pool_get - Obtain the gen_pool (if any) for a device
674 * @dev: device to retrieve the gen_pool from
675 * @name: name of a gen_pool or NULL, identifies a particular gen_pool on device
676 *
677 * Returns the gen_pool for the device if one is present, or NULL.
678 */
679struct gen_pool *gen_pool_get(struct device *dev, const char *name)
680{
681 struct gen_pool **p;
682
683 p = devres_find(dev, devm_gen_pool_release, devm_gen_pool_match,
684 (void *)name);
685 if (!p)
686 return NULL;
687 return *p;
688}
689EXPORT_SYMBOL_GPL(gen_pool_get);
690
691/**
692 * devm_gen_pool_create - managed gen_pool_create
693 * @dev: device that provides the gen_pool
694 * @min_alloc_order: log base 2 of number of bytes each bitmap bit represents
695 * @nid: node selector for allocated gen_pool, %NUMA_NO_NODE for all nodes
696 * @name: name of a gen_pool or NULL, identifies a particular gen_pool on device
697 *
698 * Create a new special memory pool that can be used to manage special purpose
699 * memory not managed by the regular kmalloc/kfree interface. The pool will be
700 * automatically destroyed by the device management code.
701 */
702struct gen_pool *devm_gen_pool_create(struct device *dev, int min_alloc_order,
703 int nid, const char *name)
704{
705 struct gen_pool **ptr, *pool;
706 const char *pool_name = NULL;
707
708 /* Check that genpool to be created is uniquely addressed on device */
709 if (gen_pool_get(dev, name))
710 return ERR_PTR(-EINVAL);
711
712 if (name) {
713 pool_name = kstrdup_const(name, GFP_KERNEL);
714 if (!pool_name)
715 return ERR_PTR(-ENOMEM);
716 }
717
718 ptr = devres_alloc(devm_gen_pool_release, sizeof(*ptr), GFP_KERNEL);
719 if (!ptr)
720 goto free_pool_name;
721
722 pool = gen_pool_create(min_alloc_order, nid);
723 if (!pool)
724 goto free_devres;
725
726 *ptr = pool;
727 pool->name = pool_name;
728 devres_add(dev, ptr);
729
730 return pool;
731
732free_devres:
733 devres_free(ptr);
734free_pool_name:
735 kfree_const(pool_name);
736
737 return ERR_PTR(-ENOMEM);
738}
739EXPORT_SYMBOL(devm_gen_pool_create);
740
741#ifdef CONFIG_OF
742/**
743 * of_gen_pool_get - find a pool by phandle property
744 * @np: device node
745 * @propname: property name containing phandle(s)
746 * @index: index into the phandle array
747 *
748 * Returns the pool that contains the chunk starting at the physical
749 * address of the device tree node pointed at by the phandle property,
750 * or NULL if not found.
751 */
752struct gen_pool *of_gen_pool_get(struct device_node *np,
753 const char *propname, int index)
754{
755 struct platform_device *pdev;
756 struct device_node *np_pool, *parent;
757 const char *name = NULL;
758 struct gen_pool *pool = NULL;
759
760 np_pool = of_parse_phandle(np, propname, index);
761 if (!np_pool)
762 return NULL;
763
764 pdev = of_find_device_by_node(np_pool);
765 if (!pdev) {
766 /* Check if named gen_pool is created by parent node device */
767 parent = of_get_parent(np_pool);
768 pdev = of_find_device_by_node(parent);
769 of_node_put(parent);
770
771 of_property_read_string(np_pool, "label", &name);
772 if (!name)
773 name = np_pool->name;
774 }
775 if (pdev)
776 pool = gen_pool_get(&pdev->dev, name);
777 of_node_put(np_pool);
778
779 return pool;
780}
781EXPORT_SYMBOL_GPL(of_gen_pool_get);
782#endif /* CONFIG_OF */