Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

fore200e: devirtualize dma alloc calls

There is no need for an indirection before calling the dma alloc
routines now that we store a struct device in struct fore200e.

Also remove the pointless GFP_ATOMIC for the sbus case, and fix the
up the error handling by removing the 0 dma_addr test - some iommus
can return 0 as a perfectly valid bus address.

Signed-off-by: Christoph Hellwig <hch@lst.de>
Signed-off-by: David S. Miller <davem@davemloft.net>

authored by

Christoph Hellwig and committed by
David S. Miller
1335d6fd f3fadcb5

+45 -85
+45 -83
drivers/atm/fore200e.c
··· 209 209 kfree(chunk->alloc_addr); 210 210 } 211 211 212 + /* 213 + * Allocate a DMA consistent chunk of memory intended to act as a communication 214 + * mechanism (to hold descriptors, status, queues, etc.) shared by the driver 215 + * and the adapter. 216 + */ 217 + static int 218 + fore200e_dma_chunk_alloc(struct fore200e *fore200e, struct chunk *chunk, 219 + int size, int nbr, int alignment) 220 + { 221 + /* returned chunks are page-aligned */ 222 + chunk->alloc_size = size * nbr; 223 + chunk->alloc_addr = dma_alloc_coherent(fore200e->dev, chunk->alloc_size, 224 + &chunk->dma_addr, GFP_KERNEL); 225 + if (!chunk->alloc_addr) 226 + return -ENOMEM; 227 + chunk->align_addr = chunk->alloc_addr; 228 + return 0; 229 + } 230 + 231 + /* 232 + * Free a DMA consistent chunk of memory. 233 + */ 234 + static void 235 + fore200e_dma_chunk_free(struct fore200e* fore200e, struct chunk* chunk) 236 + { 237 + dma_free_coherent(fore200e->dev, chunk->alloc_size, chunk->alloc_addr, 238 + chunk->dma_addr); 239 + } 212 240 213 241 static void 214 242 fore200e_spin(int msecs) ··· 329 301 struct chunk* rbd_block = &fore200e->host_bsq[ scheme ][ magn ].rbd_block; 330 302 331 303 if (status->alloc_addr) 332 - fore200e->bus->dma_chunk_free(fore200e, status); 304 + fore200e_dma_chunk_free(fore200e, status); 333 305 334 306 if (rbd_block->alloc_addr) 335 - fore200e->bus->dma_chunk_free(fore200e, rbd_block); 307 + fore200e_dma_chunk_free(fore200e, rbd_block); 336 308 } 337 309 } 338 310 } ··· 398 370 399 371 /* fall through */ 400 372 case FORE200E_STATE_INIT_RXQ: 401 - fore200e->bus->dma_chunk_free(fore200e, &fore200e->host_rxq.status); 402 - fore200e->bus->dma_chunk_free(fore200e, &fore200e->host_rxq.rpd); 373 + fore200e_dma_chunk_free(fore200e, &fore200e->host_rxq.status); 374 + fore200e_dma_chunk_free(fore200e, &fore200e->host_rxq.rpd); 403 375 404 376 /* fall through */ 405 377 case FORE200E_STATE_INIT_TXQ: 406 - fore200e->bus->dma_chunk_free(fore200e, &fore200e->host_txq.status); 407 - fore200e->bus->dma_chunk_free(fore200e, &fore200e->host_txq.tpd); 378 + fore200e_dma_chunk_free(fore200e, &fore200e->host_txq.status); 379 + fore200e_dma_chunk_free(fore200e, &fore200e->host_txq.tpd); 408 380 409 381 /* fall through */ 410 382 case FORE200E_STATE_INIT_CMDQ: 411 - fore200e->bus->dma_chunk_free(fore200e, &fore200e->host_cmdq.status); 383 + fore200e_dma_chunk_free(fore200e, &fore200e->host_cmdq.status); 412 384 413 385 /* fall through */ 414 386 case FORE200E_STATE_INITIALIZE: ··· 454 426 the endianess of slave RAM accesses */ 455 427 writel(cpu_to_le32(val), addr); 456 428 } 457 - 458 - /* allocate a DMA consistent chunk of memory intended to act as a communication mechanism 459 - (to hold descriptors, status, queues, etc.) shared by the driver and the adapter */ 460 - 461 - static int 462 - fore200e_pca_dma_chunk_alloc(struct fore200e* fore200e, struct chunk* chunk, 463 - int size, int nbr, int alignment) 464 - { 465 - /* returned chunks are page-aligned */ 466 - chunk->alloc_size = size * nbr; 467 - chunk->alloc_addr = dma_alloc_coherent(fore200e->dev, 468 - chunk->alloc_size, 469 - &chunk->dma_addr, 470 - GFP_KERNEL); 471 - 472 - if ((chunk->alloc_addr == NULL) || (chunk->dma_addr == 0)) 473 - return -ENOMEM; 474 - 475 - chunk->align_addr = chunk->alloc_addr; 476 - 477 - return 0; 478 - } 479 - 480 - 481 - /* free a DMA consistent chunk of memory */ 482 - 483 - static void 484 - fore200e_pca_dma_chunk_free(struct fore200e* fore200e, struct chunk* chunk) 485 - { 486 - dma_free_coherent(fore200e->dev, 487 - chunk->alloc_size, 488 - chunk->alloc_addr, 489 - chunk->dma_addr); 490 - } 491 - 492 429 493 430 static int 494 431 fore200e_pca_irq_check(struct fore200e* fore200e) ··· 624 631 .status_alignment = 32, 625 632 .read = fore200e_pca_read, 626 633 .write = fore200e_pca_write, 627 - .dma_chunk_alloc = fore200e_pca_dma_chunk_alloc, 628 - .dma_chunk_free = fore200e_pca_dma_chunk_free, 629 634 .configure = fore200e_pca_configure, 630 635 .map = fore200e_pca_map, 631 636 .reset = fore200e_pca_reset, ··· 645 654 static void fore200e_sba_write(u32 val, volatile u32 __iomem *addr) 646 655 { 647 656 sbus_writel(val, addr); 648 - } 649 - 650 - /* Allocate a DVMA consistent chunk of memory intended to act as a communication mechanism 651 - * (to hold descriptors, status, queues, etc.) shared by the driver and the adapter. 652 - */ 653 - static int fore200e_sba_dma_chunk_alloc(struct fore200e *fore200e, struct chunk *chunk, 654 - int size, int nbr, int alignment) 655 - { 656 - chunk->alloc_size = size * nbr; 657 - 658 - /* returned chunks are page-aligned */ 659 - chunk->alloc_addr = dma_alloc_coherent(fore200e->dev, chunk->alloc_size, 660 - &chunk->dma_addr, GFP_ATOMIC); 661 - 662 - if ((chunk->alloc_addr == NULL) || (chunk->dma_addr == 0)) 663 - return -ENOMEM; 664 - 665 - chunk->align_addr = chunk->alloc_addr; 666 - 667 - return 0; 668 - } 669 - 670 - /* free a DVMA consistent chunk of memory */ 671 - static void fore200e_sba_dma_chunk_free(struct fore200e *fore200e, struct chunk *chunk) 672 - { 673 - dma_free_coherent(fore200e->dev, chunk->alloc_size, 674 - chunk->alloc_addr, chunk->dma_addr); 675 657 } 676 658 677 659 static void fore200e_sba_irq_enable(struct fore200e *fore200e) ··· 760 796 .status_alignment = 32, 761 797 .read = fore200e_sba_read, 762 798 .write = fore200e_sba_write, 763 - .dma_chunk_alloc = fore200e_sba_dma_chunk_alloc, 764 - .dma_chunk_free = fore200e_sba_dma_chunk_free, 765 799 .configure = fore200e_sba_configure, 766 800 .map = fore200e_sba_map, 767 801 .reset = fore200e_sba_reset, ··· 2073 2111 bsq = &fore200e->host_bsq[ scheme ][ magn ]; 2074 2112 2075 2113 /* allocate and align the array of status words */ 2076 - if (fore200e->bus->dma_chunk_alloc(fore200e, 2114 + if (fore200e_dma_chunk_alloc(fore200e, 2077 2115 &bsq->status, 2078 2116 sizeof(enum status), 2079 2117 QUEUE_SIZE_BS, ··· 2082 2120 } 2083 2121 2084 2122 /* allocate and align the array of receive buffer descriptors */ 2085 - if (fore200e->bus->dma_chunk_alloc(fore200e, 2123 + if (fore200e_dma_chunk_alloc(fore200e, 2086 2124 &bsq->rbd_block, 2087 2125 sizeof(struct rbd_block), 2088 2126 QUEUE_SIZE_BS, 2089 2127 fore200e->bus->descr_alignment) < 0) { 2090 2128 2091 - fore200e->bus->dma_chunk_free(fore200e, &bsq->status); 2129 + fore200e_dma_chunk_free(fore200e, &bsq->status); 2092 2130 return -ENOMEM; 2093 2131 } 2094 2132 ··· 2129 2167 DPRINTK(2, "receive queue is being initialized\n"); 2130 2168 2131 2169 /* allocate and align the array of status words */ 2132 - if (fore200e->bus->dma_chunk_alloc(fore200e, 2170 + if (fore200e_dma_chunk_alloc(fore200e, 2133 2171 &rxq->status, 2134 2172 sizeof(enum status), 2135 2173 QUEUE_SIZE_RX, ··· 2138 2176 } 2139 2177 2140 2178 /* allocate and align the array of receive PDU descriptors */ 2141 - if (fore200e->bus->dma_chunk_alloc(fore200e, 2179 + if (fore200e_dma_chunk_alloc(fore200e, 2142 2180 &rxq->rpd, 2143 2181 sizeof(struct rpd), 2144 2182 QUEUE_SIZE_RX, 2145 2183 fore200e->bus->descr_alignment) < 0) { 2146 2184 2147 - fore200e->bus->dma_chunk_free(fore200e, &rxq->status); 2185 + fore200e_dma_chunk_free(fore200e, &rxq->status); 2148 2186 return -ENOMEM; 2149 2187 } 2150 2188 ··· 2188 2226 DPRINTK(2, "transmit queue is being initialized\n"); 2189 2227 2190 2228 /* allocate and align the array of status words */ 2191 - if (fore200e->bus->dma_chunk_alloc(fore200e, 2229 + if (fore200e_dma_chunk_alloc(fore200e, 2192 2230 &txq->status, 2193 2231 sizeof(enum status), 2194 2232 QUEUE_SIZE_TX, ··· 2197 2235 } 2198 2236 2199 2237 /* allocate and align the array of transmit PDU descriptors */ 2200 - if (fore200e->bus->dma_chunk_alloc(fore200e, 2238 + if (fore200e_dma_chunk_alloc(fore200e, 2201 2239 &txq->tpd, 2202 2240 sizeof(struct tpd), 2203 2241 QUEUE_SIZE_TX, 2204 2242 fore200e->bus->descr_alignment) < 0) { 2205 2243 2206 - fore200e->bus->dma_chunk_free(fore200e, &txq->status); 2244 + fore200e_dma_chunk_free(fore200e, &txq->status); 2207 2245 return -ENOMEM; 2208 2246 } 2209 2247 ··· 2250 2288 DPRINTK(2, "command queue is being initialized\n"); 2251 2289 2252 2290 /* allocate and align the array of status words */ 2253 - if (fore200e->bus->dma_chunk_alloc(fore200e, 2291 + if (fore200e_dma_chunk_alloc(fore200e, 2254 2292 &cmdq->status, 2255 2293 sizeof(enum status), 2256 2294 QUEUE_SIZE_CMD,
-2
drivers/atm/fore200e.h
··· 805 805 int status_alignment; /* status words DMA alignment requirement */ 806 806 u32 (*read)(volatile u32 __iomem *); 807 807 void (*write)(u32, volatile u32 __iomem *); 808 - int (*dma_chunk_alloc)(struct fore200e*, struct chunk*, int, int, int); 809 - void (*dma_chunk_free)(struct fore200e*, struct chunk*); 810 808 int (*configure)(struct fore200e*); 811 809 int (*map)(struct fore200e*); 812 810 void (*reset)(struct fore200e*);