[PATCH] spi: use linked lists rather than an array

This makes the SPI core and its users access transfers in the SPI message
structure as linked list not as an array, as discussed on LKML.

From: David Brownell <dbrownell@users.sourceforge.net>

Updates including doc, bugfixes to the list code, add
spi_message_add_tail(). Plus, initialize things _before_ grabbing the
locks in some cases (in case it grows more expensive). This also merges
some bitbang updates of mine that didn't yet make it into the mm tree.

Signed-off-by: Vitaly Wool <vwool@ru.mvista.com>
Signed-off-by: Dmitry Pervushin <dpervushin@gmail.com>
Signed-off-by: David Brownell <dbrownell@users.sourceforge.net>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>

authored by Vitaly Wool and committed by Greg Kroah-Hartman 8275c642 2f9f7628

+180 -113
+8 -4
drivers/input/touchscreen/ads7846.c
··· 155 struct ser_req *req = kzalloc(sizeof *req, SLAB_KERNEL); 156 int status; 157 int sample; 158 159 if (!req) 160 return -ENOMEM; 161 162 /* activate reference, so it has time to settle; */ 163 req->xfer[0].tx_buf = &ref_on; ··· 195 /* group all the transfers together, so we can't interfere with 196 * reading touchscreen state; disable penirq while sampling 197 */ 198 - req->msg.transfers = req->xfer; 199 - req->msg.n_transfer = 6; 200 201 disable_irq(spi->irq); 202 status = spi_sync(spi, &req->msg); ··· 401 struct ads7846 *ts; 402 struct ads7846_platform_data *pdata = spi->dev.platform_data; 403 struct spi_transfer *x; 404 405 if (!spi->irq) { 406 dev_dbg(&spi->dev, "no IRQ?\n"); ··· 504 505 CS_CHANGE(x[-1]); 506 507 - ts->msg.transfers = ts->xfer; 508 - ts->msg.n_transfer = x - ts->xfer; 509 ts->msg.complete = ads7846_rx; 510 ts->msg.context = ts; 511
··· 155 struct ser_req *req = kzalloc(sizeof *req, SLAB_KERNEL); 156 int status; 157 int sample; 158 + int i; 159 160 if (!req) 161 return -ENOMEM; 162 + 163 + INIT_LIST_HEAD(&req->msg.transfers); 164 165 /* activate reference, so it has time to settle; */ 166 req->xfer[0].tx_buf = &ref_on; ··· 192 /* group all the transfers together, so we can't interfere with 193 * reading touchscreen state; disable penirq while sampling 194 */ 195 + for (i = 0; i < 6; i++) 196 + spi_message_add_tail(&req->xfer[i], &req->msg); 197 198 disable_irq(spi->irq); 199 status = spi_sync(spi, &req->msg); ··· 398 struct ads7846 *ts; 399 struct ads7846_platform_data *pdata = spi->dev.platform_data; 400 struct spi_transfer *x; 401 + int i; 402 403 if (!spi->irq) { 404 dev_dbg(&spi->dev, "no IRQ?\n"); ··· 500 501 CS_CHANGE(x[-1]); 502 503 + for (i = 0; i < x - ts->xfer; i++) 504 + spi_message_add_tail(&ts->xfer[i], &ts->msg); 505 ts->msg.complete = ads7846_rx; 506 ts->msg.context = ts; 507
+25 -25
drivers/mtd/devices/m25p80.c
··· 245 if (from + len > flash->mtd.size) 246 return -EINVAL; 247 248 down(&flash->lock); 249 250 /* Wait till previous write/erase is done. */ ··· 269 return 1; 270 } 271 272 - memset(t, 0, (sizeof t)); 273 - 274 /* NOTE: OPCODE_FAST_READ (if available) is faster... */ 275 276 /* Set up the write data buffer. */ ··· 276 flash->command[1] = from >> 16; 277 flash->command[2] = from >> 8; 278 flash->command[3] = from; 279 - 280 - /* Byte count starts at zero. */ 281 - if (retlen) 282 - *retlen = 0; 283 - 284 - t[0].tx_buf = flash->command; 285 - t[0].len = sizeof(flash->command); 286 - 287 - t[1].rx_buf = buf; 288 - t[1].len = len; 289 - 290 - m.transfers = t; 291 - m.n_transfer = 2; 292 293 spi_sync(flash->spi, &m); 294 ··· 313 if (to + len > flash->mtd.size) 314 return -EINVAL; 315 316 down(&flash->lock); 317 318 /* Wait until finished previous write command. */ ··· 331 332 write_enable(flash); 333 334 - memset(t, 0, (sizeof t)); 335 - 336 /* Set up the opcode in the write buffer. */ 337 flash->command[0] = OPCODE_PP; 338 flash->command[1] = to >> 16; 339 flash->command[2] = to >> 8; 340 flash->command[3] = to; 341 342 - t[0].tx_buf = flash->command; 343 - t[0].len = sizeof(flash->command); 344 - 345 - m.transfers = t; 346 - m.n_transfer = 2; 347 - 348 /* what page do we start with? */ 349 page_offset = to % FLASH_PAGESIZE; 350 351 /* do all the bytes fit onto one page? */ 352 if (page_offset + len <= FLASH_PAGESIZE) { 353 - t[1].tx_buf = buf; 354 t[1].len = len; 355 356 spi_sync(flash->spi, &m); ··· 353 /* the size of data remaining on the first page */ 354 page_size = FLASH_PAGESIZE - page_offset; 355 356 - t[1].tx_buf = buf; 357 t[1].len = page_size; 358 spi_sync(flash->spi, &m); 359
··· 245 if (from + len > flash->mtd.size) 246 return -EINVAL; 247 248 + spi_message_init(&m); 249 + memset(t, 0, (sizeof t)); 250 + 251 + t[0].tx_buf = flash->command; 252 + t[0].len = sizeof(flash->command); 253 + spi_message_add_tail(&t[0], &m); 254 + 255 + t[1].rx_buf = buf; 256 + t[1].len = len; 257 + spi_message_add_tail(&t[1], &m); 258 + 259 + /* Byte count starts at zero. */ 260 + if (retlen) 261 + *retlen = 0; 262 + 263 down(&flash->lock); 264 265 /* Wait till previous write/erase is done. */ ··· 254 return 1; 255 } 256 257 /* NOTE: OPCODE_FAST_READ (if available) is faster... */ 258 259 /* Set up the write data buffer. */ ··· 263 flash->command[1] = from >> 16; 264 flash->command[2] = from >> 8; 265 flash->command[3] = from; 266 267 spi_sync(flash->spi, &m); 268 ··· 313 if (to + len > flash->mtd.size) 314 return -EINVAL; 315 316 + spi_message_init(&m); 317 + memset(t, 0, (sizeof t)); 318 + 319 + t[0].tx_buf = flash->command; 320 + t[0].len = sizeof(flash->command); 321 + spi_message_add_tail(&t[0], &m); 322 + 323 + t[1].tx_buf = buf; 324 + spi_message_add_tail(&t[1], &m); 325 + 326 down(&flash->lock); 327 328 /* Wait until finished previous write command. */ ··· 321 322 write_enable(flash); 323 324 /* Set up the opcode in the write buffer. */ 325 flash->command[0] = OPCODE_PP; 326 flash->command[1] = to >> 16; 327 flash->command[2] = to >> 8; 328 flash->command[3] = to; 329 330 /* what page do we start with? */ 331 page_offset = to % FLASH_PAGESIZE; 332 333 /* do all the bytes fit onto one page? */ 334 if (page_offset + len <= FLASH_PAGESIZE) { 335 t[1].len = len; 336 337 spi_sync(flash->spi, &m); ··· 352 /* the size of data remaining on the first page */ 353 page_size = FLASH_PAGESIZE - page_offset; 354 355 t[1].len = page_size; 356 spi_sync(flash->spi, &m); 357
+17 -11
drivers/mtd/devices/mtd_dataflash.c
··· 147 { 148 struct dataflash *priv = (struct dataflash *)mtd->priv; 149 struct spi_device *spi = priv->spi; 150 - struct spi_transfer x[1] = { { .tx_dma = 0, }, }; 151 struct spi_message msg; 152 unsigned blocksize = priv->page_size << 3; 153 u8 *command; ··· 162 || (instr->addr % priv->page_size) != 0) 163 return -EINVAL; 164 165 - x[0].tx_buf = command = priv->command; 166 - x[0].len = 4; 167 - msg.transfers = x; 168 - msg.n_transfer = 1; 169 170 down(&priv->lock); 171 while (instr->len > 0) { ··· 257 DEBUG(MTD_DEBUG_LEVEL3, "READ: (%x) %x %x %x\n", 258 command[0], command[1], command[2], command[3]); 259 260 x[0].tx_buf = command; 261 x[0].len = 8; 262 x[1].rx_buf = buf; 263 x[1].len = len; 264 - msg.transfers = x; 265 - msg.n_transfer = 2; 266 267 down(&priv->lock); 268 ··· 324 if ((to + len) > mtd->size) 325 return -EINVAL; 326 327 x[0].tx_buf = command = priv->command; 328 x[0].len = 4; 329 - msg.transfers = x; 330 331 pageaddr = ((unsigned)to / priv->page_size); 332 offset = ((unsigned)to % priv->page_size); ··· 370 DEBUG(MTD_DEBUG_LEVEL3, "TRANSFER: (%x) %x %x %x\n", 371 command[0], command[1], command[2], command[3]); 372 373 - msg.n_transfer = 1; 374 status = spi_sync(spi, &msg); 375 if (status < 0) 376 DEBUG(MTD_DEBUG_LEVEL1, "%s: xfer %u -> %d \n", ··· 390 391 x[1].tx_buf = writebuf; 392 x[1].len = writelen; 393 - msg.n_transfer = 2; 394 status = spi_sync(spi, &msg); 395 if (status < 0) 396 DEBUG(MTD_DEBUG_LEVEL1, "%s: pgm %u/%u -> %d \n", 397 spi->dev.bus_id, addr, writelen, status); 398 399 (void) dataflash_waitready(priv->spi); 400 401 #ifdef CONFIG_DATAFLASH_WRITE_VERIFY 402 ··· 412 DEBUG(MTD_DEBUG_LEVEL3, "COMPARE: (%x) %x %x %x\n", 413 command[0], command[1], command[2], command[3]); 414 415 - msg.n_transfer = 1; 416 status = spi_sync(spi, &msg); 417 if (status < 0) 418 DEBUG(MTD_DEBUG_LEVEL1, "%s: compare %u -> %d \n",
··· 147 { 148 struct dataflash *priv = (struct dataflash *)mtd->priv; 149 struct spi_device *spi = priv->spi; 150 + struct spi_transfer x = { .tx_dma = 0, }; 151 struct spi_message msg; 152 unsigned blocksize = priv->page_size << 3; 153 u8 *command; ··· 162 || (instr->addr % priv->page_size) != 0) 163 return -EINVAL; 164 165 + spi_message_init(&msg); 166 + 167 + x.tx_buf = command = priv->command; 168 + x.len = 4; 169 + spi_message_add_tail(&x, &msg); 170 171 down(&priv->lock); 172 while (instr->len > 0) { ··· 256 DEBUG(MTD_DEBUG_LEVEL3, "READ: (%x) %x %x %x\n", 257 command[0], command[1], command[2], command[3]); 258 259 + spi_message_init(&msg); 260 + 261 x[0].tx_buf = command; 262 x[0].len = 8; 263 + spi_message_add_tail(&x[0], &msg); 264 + 265 x[1].rx_buf = buf; 266 x[1].len = len; 267 + spi_message_add_tail(&x[1], &msg); 268 269 down(&priv->lock); 270 ··· 320 if ((to + len) > mtd->size) 321 return -EINVAL; 322 323 + spi_message_init(&msg); 324 + 325 x[0].tx_buf = command = priv->command; 326 x[0].len = 4; 327 + spi_message_add_tail(&x[0], &msg); 328 329 pageaddr = ((unsigned)to / priv->page_size); 330 offset = ((unsigned)to % priv->page_size); ··· 364 DEBUG(MTD_DEBUG_LEVEL3, "TRANSFER: (%x) %x %x %x\n", 365 command[0], command[1], command[2], command[3]); 366 367 status = spi_sync(spi, &msg); 368 if (status < 0) 369 DEBUG(MTD_DEBUG_LEVEL1, "%s: xfer %u -> %d \n", ··· 385 386 x[1].tx_buf = writebuf; 387 x[1].len = writelen; 388 + spi_message_add_tail(x + 1, &msg); 389 status = spi_sync(spi, &msg); 390 + spi_transfer_del(x + 1); 391 if (status < 0) 392 DEBUG(MTD_DEBUG_LEVEL1, "%s: pgm %u/%u -> %d \n", 393 spi->dev.bus_id, addr, writelen, status); 394 395 (void) dataflash_waitready(priv->spi); 396 + 397 398 #ifdef CONFIG_DATAFLASH_WRITE_VERIFY 399 ··· 405 DEBUG(MTD_DEBUG_LEVEL3, "COMPARE: (%x) %x %x %x\n", 406 command[0], command[1], command[2], command[3]); 407 408 status = spi_sync(spi, &msg); 409 if (status < 0) 410 DEBUG(MTD_DEBUG_LEVEL1, "%s: compare %u -> %d \n",
+11 -7
drivers/spi/spi.c
··· 557 if ((n_tx + n_rx) > SPI_BUFSIZ) 558 return -EINVAL; 559 560 /* ... unless someone else is using the pre-allocated buffer */ 561 if (down_trylock(&lock)) { 562 local_buf = kmalloc(SPI_BUFSIZ, GFP_KERNEL); ··· 576 } else 577 local_buf = buf; 578 579 - memset(x, 0, sizeof x); 580 - 581 memcpy(local_buf, txbuf, n_tx); 582 x[0].tx_buf = local_buf; 583 - x[0].len = n_tx; 584 - 585 x[1].rx_buf = local_buf + n_tx; 586 - x[1].len = n_rx; 587 588 /* do the i/o */ 589 - message.transfers = x; 590 - message.n_transfer = ARRAY_SIZE(x); 591 status = spi_sync(spi, &message); 592 if (status == 0) { 593 memcpy(rxbuf, x[1].rx_buf, n_rx);
··· 557 if ((n_tx + n_rx) > SPI_BUFSIZ) 558 return -EINVAL; 559 560 + spi_message_init(&message); 561 + memset(x, 0, sizeof x); 562 + if (n_tx) { 563 + x[0].len = n_tx; 564 + spi_message_add_tail(&x[0], &message); 565 + } 566 + if (n_rx) { 567 + x[1].len = n_rx; 568 + spi_message_add_tail(&x[1], &message); 569 + } 570 + 571 /* ... unless someone else is using the pre-allocated buffer */ 572 if (down_trylock(&lock)) { 573 local_buf = kmalloc(SPI_BUFSIZ, GFP_KERNEL); ··· 565 } else 566 local_buf = buf; 567 568 memcpy(local_buf, txbuf, n_tx); 569 x[0].tx_buf = local_buf; 570 x[1].rx_buf = local_buf + n_tx; 571 572 /* do the i/o */ 573 status = spi_sync(spi, &message); 574 if (status == 0) { 575 memcpy(rxbuf, x[1].rx_buf, n_rx);
+49 -37
drivers/spi/spi_bitbang.c
··· 146 struct spi_bitbang_cs *cs = spi->controller_state; 147 struct spi_bitbang *bitbang; 148 149 if (!cs) { 150 cs = kzalloc(sizeof *cs, SLAB_KERNEL); 151 if (!cs) ··· 175 if (!cs->txrx_word) 176 return -EINVAL; 177 178 - if (!spi->max_speed_hz) 179 - spi->max_speed_hz = 500 * 1000; 180 - 181 - /* nsecs = max(50, (clock period)/2), be optimistic */ 182 cs->nsecs = (1000000000/2) / (spi->max_speed_hz); 183 - if (cs->nsecs < 50) 184 - cs->nsecs = 50; 185 if (cs->nsecs > MAX_UDELAY_MS * 1000) 186 return -EINVAL; 187 ··· 192 /* deselect chip (low or high) */ 193 spin_lock(&bitbang->lock); 194 if (!bitbang->busy) { 195 - bitbang->chipselect(spi, 0); 196 ndelay(cs->nsecs); 197 } 198 spin_unlock(&bitbang->lock); ··· 242 struct spi_message *m; 243 struct spi_device *spi; 244 unsigned nsecs; 245 - struct spi_transfer *t; 246 unsigned tmp; 247 - unsigned chipselect; 248 int status; 249 250 m = container_of(bitbang->queue.next, struct spi_message, ··· 252 list_del_init(&m->queue); 253 spin_unlock_irqrestore(&bitbang->lock, flags); 254 255 - // FIXME this is made-up 256 - nsecs = 100; 257 258 spi = m->spi; 259 - t = m->transfers; 260 tmp = 0; 261 - chipselect = 0; 262 status = 0; 263 264 - for (;;t++) { 265 if (bitbang->shutdown) { 266 status = -ESHUTDOWN; 267 break; 268 } 269 270 - /* set up default clock polarity, and activate chip */ 271 - if (!chipselect) { 272 - bitbang->chipselect(spi, 1); 273 ndelay(nsecs); 274 } 275 if (!t->tx_buf && !t->rx_buf && t->len) { 276 status = -EINVAL; 277 break; 278 } 279 280 - /* transfer data */ 281 if (t->len) { 282 - /* FIXME if bitbang->use_dma, dma_map_single() 283 - * before the transfer, and dma_unmap_single() 284 - * afterwards, for either or both buffers... 285 */ 286 status = bitbang->txrx_bufs(spi, t); 287 } 288 if (status != t->len) { ··· 309 if (t->delay_usecs) 310 udelay(t->delay_usecs); 311 312 - tmp++; 313 - if (tmp >= m->n_transfer) 314 break; 315 316 - chipselect = !t->cs_change; 317 - if (chipselect); 318 - continue; 319 - 320 - bitbang->chipselect(spi, 0); 321 - 322 - /* REVISIT do we want the udelay here instead? */ 323 - msleep(1); 324 } 325 - 326 - tmp = m->n_transfer - 1; 327 - tmp = m->transfers[tmp].cs_change; 328 329 m->status = status; 330 m->complete(m->context); 331 332 - ndelay(2 * nsecs); 333 - bitbang->chipselect(spi, status == 0 && tmp); 334 - ndelay(nsecs); 335 336 spin_lock_irqsave(&bitbang->lock, flags); 337 }
··· 146 struct spi_bitbang_cs *cs = spi->controller_state; 147 struct spi_bitbang *bitbang; 148 149 + if (!spi->max_speed_hz) 150 + return -EINVAL; 151 + 152 if (!cs) { 153 cs = kzalloc(sizeof *cs, SLAB_KERNEL); 154 if (!cs) ··· 172 if (!cs->txrx_word) 173 return -EINVAL; 174 175 + /* nsecs = (clock period)/2 */ 176 cs->nsecs = (1000000000/2) / (spi->max_speed_hz); 177 if (cs->nsecs > MAX_UDELAY_MS * 1000) 178 return -EINVAL; 179 ··· 194 /* deselect chip (low or high) */ 195 spin_lock(&bitbang->lock); 196 if (!bitbang->busy) { 197 + bitbang->chipselect(spi, BITBANG_CS_INACTIVE); 198 ndelay(cs->nsecs); 199 } 200 spin_unlock(&bitbang->lock); ··· 244 struct spi_message *m; 245 struct spi_device *spi; 246 unsigned nsecs; 247 + struct spi_transfer *t = NULL; 248 unsigned tmp; 249 + unsigned cs_change; 250 int status; 251 252 m = container_of(bitbang->queue.next, struct spi_message, ··· 254 list_del_init(&m->queue); 255 spin_unlock_irqrestore(&bitbang->lock, flags); 256 257 + /* FIXME this is made-up ... the correct value is known to 258 + * word-at-a-time bitbang code, and presumably chipselect() 259 + * should enforce these requirements too? 260 + */ 261 + nsecs = 100; 262 263 spi = m->spi; 264 tmp = 0; 265 + cs_change = 1; 266 status = 0; 267 268 + list_for_each_entry (t, &m->transfers, transfer_list) { 269 if (bitbang->shutdown) { 270 status = -ESHUTDOWN; 271 break; 272 } 273 274 + /* set up default clock polarity, and activate chip; 275 + * this implicitly updates clock and spi modes as 276 + * previously recorded for this device via setup(). 277 + * (and also deselects any other chip that might be 278 + * selected ...) 279 + */ 280 + if (cs_change) { 281 + bitbang->chipselect(spi, BITBANG_CS_ACTIVE); 282 ndelay(nsecs); 283 } 284 + cs_change = t->cs_change; 285 if (!t->tx_buf && !t->rx_buf && t->len) { 286 status = -EINVAL; 287 break; 288 } 289 290 + /* transfer data. the lower level code handles any 291 + * new dma mappings it needs. our caller always gave 292 + * us dma-safe buffers. 293 + */ 294 if (t->len) { 295 + /* REVISIT dma API still needs a designated 296 + * DMA_ADDR_INVALID; ~0 might be better. 297 */ 298 + if (!m->is_dma_mapped) 299 + t->rx_dma = t->tx_dma = 0; 300 status = bitbang->txrx_bufs(spi, t); 301 } 302 if (status != t->len) { ··· 299 if (t->delay_usecs) 300 udelay(t->delay_usecs); 301 302 + if (!cs_change) 303 + continue; 304 + if (t->transfer_list.next == &m->transfers) 305 break; 306 307 + /* sometimes a short mid-message deselect of the chip 308 + * may be needed to terminate a mode or command 309 + */ 310 + ndelay(nsecs); 311 + bitbang->chipselect(spi, BITBANG_CS_INACTIVE); 312 + ndelay(nsecs); 313 } 314 315 m->status = status; 316 m->complete(m->context); 317 318 + /* normally deactivate chipselect ... unless no error and 319 + * cs_change has hinted that the next message will probably 320 + * be for this chip too. 321 + */ 322 + if (!(status == 0 && cs_change)) { 323 + ndelay(nsecs); 324 + bitbang->chipselect(spi, BITBANG_CS_INACTIVE); 325 + ndelay(nsecs); 326 + } 327 328 spin_lock_irqsave(&bitbang->lock, flags); 329 }
+63 -29
include/linux/spi/spi.h
··· 263 264 /** 265 * struct spi_transfer - a read/write buffer pair 266 - * @tx_buf: data to be written (dma-safe address), or NULL 267 - * @rx_buf: data to be read (dma-safe address), or NULL 268 - * @tx_dma: DMA address of buffer, if spi_message.is_dma_mapped 269 - * @rx_dma: DMA address of buffer, if spi_message.is_dma_mapped 270 * @len: size of rx and tx buffers (in bytes) 271 * @cs_change: affects chipselect after this transfer completes 272 * @delay_usecs: microseconds to delay after this transfer before 273 * (optionally) changing the chipselect status, then starting 274 * the next transfer or completing this spi_message. 275 * 276 * SPI transfers always write the same number of bytes as they read. 277 * Protocol drivers should always provide rx_buf and/or tx_buf. ··· 280 * the data being transferred; that may reduce overhead, when the 281 * underlying driver uses dma. 282 * 283 - * All SPI transfers start with the relevant chipselect active. Drivers 284 - * can change behavior of the chipselect after the transfer finishes 285 - * (including any mandatory delay). The normal behavior is to leave it 286 - * selected, except for the last transfer in a message. Setting cs_change 287 - * allows two additional behavior options: 288 * 289 * (i) If the transfer isn't the last one in the message, this flag is 290 * used to make the chipselect briefly go inactive in the middle of the ··· 305 * The code that submits an spi_message (and its spi_transfers) 306 * to the lower layers is responsible for managing its memory. 307 * Zero-initialize every field you don't set up explicitly, to 308 - * insulate against future API updates. 309 */ 310 struct spi_transfer { 311 /* it's ok if tx_buf == rx_buf (right?) ··· 323 324 unsigned cs_change:1; 325 u16 delay_usecs; 326 }; 327 328 /** 329 * struct spi_message - one multi-segment SPI transaction 330 - * @transfers: the segements of the transaction 331 - * @n_transfer: how many segments 332 * @spi: SPI device to which the transaction is queued 333 * @is_dma_mapped: if true, the caller provided both dma and cpu virtual 334 * addresses for each transfer buffer ··· 341 * @queue: for use by whichever driver currently owns the message 342 * @state: for use by whichever driver currently owns the message 343 * 344 * The code that submits an spi_message (and its spi_transfers) 345 * to the lower layers is responsible for managing its memory. 346 * Zero-initialize every field you don't set up explicitly, to 347 - * insulate against future API updates. 348 */ 349 struct spi_message { 350 - struct spi_transfer *transfers; 351 - unsigned n_transfer; 352 353 struct spi_device *spi; 354 ··· 387 void *state; 388 }; 389 390 /* It's fine to embed message and transaction structures in other data 391 * structures so long as you don't free them while they're in use. 392 */ ··· 417 + ntrans * sizeof(struct spi_transfer), 418 flags); 419 if (m) { 420 - m->transfers = (void *)(m + 1); 421 - m->n_transfer = ntrans; 422 } 423 return m; 424 } ··· 440 * device doesn't work with the mode 0 default. They may likewise need 441 * to update clock rates or word sizes from initial values. This function 442 * changes those settings, and must be called from a context that can sleep. 443 */ 444 static inline int 445 spi_setup(struct spi_device *spi) ··· 508 { 509 struct spi_transfer t = { 510 .tx_buf = buf, 511 - .rx_buf = NULL, 512 .len = len, 513 - .cs_change = 0, 514 }; 515 - struct spi_message m = { 516 - .transfers = &t, 517 - .n_transfer = 1, 518 - }; 519 520 return spi_sync(spi, &m); 521 } 522 ··· 530 spi_read(struct spi_device *spi, u8 *buf, size_t len) 531 { 532 struct spi_transfer t = { 533 - .tx_buf = NULL, 534 .rx_buf = buf, 535 .len = len, 536 - .cs_change = 0, 537 }; 538 - struct spi_message m = { 539 - .transfers = &t, 540 - .n_transfer = 1, 541 - }; 542 543 return spi_sync(spi, &m); 544 } 545
··· 263 264 /** 265 * struct spi_transfer - a read/write buffer pair 266 + * @tx_buf: data to be written (dma-safe memory), or NULL 267 + * @rx_buf: data to be read (dma-safe memory), or NULL 268 + * @tx_dma: DMA address of tx_buf, if spi_message.is_dma_mapped 269 + * @rx_dma: DMA address of rx_buf, if spi_message.is_dma_mapped 270 * @len: size of rx and tx buffers (in bytes) 271 * @cs_change: affects chipselect after this transfer completes 272 * @delay_usecs: microseconds to delay after this transfer before 273 * (optionally) changing the chipselect status, then starting 274 * the next transfer or completing this spi_message. 275 + * @transfer_list: transfers are sequenced through spi_message.transfers 276 * 277 * SPI transfers always write the same number of bytes as they read. 278 * Protocol drivers should always provide rx_buf and/or tx_buf. ··· 279 * the data being transferred; that may reduce overhead, when the 280 * underlying driver uses dma. 281 * 282 + * If the transmit buffer is null, undefined data will be shifted out 283 + * while filling rx_buf. If the receive buffer is null, the data 284 + * shifted in will be discarded. Only "len" bytes shift out (or in). 285 + * It's an error to try to shift out a partial word. (For example, by 286 + * shifting out three bytes with word size of sixteen or twenty bits; 287 + * the former uses two bytes per word, the latter uses four bytes.) 288 + * 289 + * All SPI transfers start with the relevant chipselect active. Normally 290 + * it stays selected until after the last transfer in a message. Drivers 291 + * can affect the chipselect signal using cs_change: 292 * 293 * (i) If the transfer isn't the last one in the message, this flag is 294 * used to make the chipselect briefly go inactive in the middle of the ··· 299 * The code that submits an spi_message (and its spi_transfers) 300 * to the lower layers is responsible for managing its memory. 301 * Zero-initialize every field you don't set up explicitly, to 302 + * insulate against future API updates. After you submit a message 303 + * and its transfers, ignore them until its completion callback. 304 */ 305 struct spi_transfer { 306 /* it's ok if tx_buf == rx_buf (right?) ··· 316 317 unsigned cs_change:1; 318 u16 delay_usecs; 319 + 320 + struct list_head transfer_list; 321 }; 322 323 /** 324 * struct spi_message - one multi-segment SPI transaction 325 + * @transfers: list of transfer segments in this transaction 326 * @spi: SPI device to which the transaction is queued 327 * @is_dma_mapped: if true, the caller provided both dma and cpu virtual 328 * addresses for each transfer buffer ··· 333 * @queue: for use by whichever driver currently owns the message 334 * @state: for use by whichever driver currently owns the message 335 * 336 + * An spi_message is used to execute an atomic sequence of data transfers, 337 + * each represented by a struct spi_transfer. The sequence is "atomic" 338 + * in the sense that no other spi_message may use that SPI bus until that 339 + * sequence completes. On some systems, many such sequences can execute as 340 + * as single programmed DMA transfer. On all systems, these messages are 341 + * queued, and might complete after transactions to other devices. Messages 342 + * sent to a given spi_device are alway executed in FIFO order. 343 + * 344 * The code that submits an spi_message (and its spi_transfers) 345 * to the lower layers is responsible for managing its memory. 346 * Zero-initialize every field you don't set up explicitly, to 347 + * insulate against future API updates. After you submit a message 348 + * and its transfers, ignore them until its completion callback. 349 */ 350 struct spi_message { 351 + struct list_head transfers; 352 353 struct spi_device *spi; 354 ··· 371 void *state; 372 }; 373 374 + static inline void spi_message_init(struct spi_message *m) 375 + { 376 + memset(m, 0, sizeof *m); 377 + INIT_LIST_HEAD(&m->transfers); 378 + } 379 + 380 + static inline void 381 + spi_message_add_tail(struct spi_transfer *t, struct spi_message *m) 382 + { 383 + list_add_tail(&t->transfer_list, &m->transfers); 384 + } 385 + 386 + static inline void 387 + spi_transfer_del(struct spi_transfer *t) 388 + { 389 + list_del(&t->transfer_list); 390 + } 391 + 392 /* It's fine to embed message and transaction structures in other data 393 * structures so long as you don't free them while they're in use. 394 */ ··· 383 + ntrans * sizeof(struct spi_transfer), 384 flags); 385 if (m) { 386 + int i; 387 + struct spi_transfer *t = (struct spi_transfer *)(m + 1); 388 + 389 + INIT_LIST_HEAD(&m->transfers); 390 + for (i = 0; i < ntrans; i++, t++) 391 + spi_message_add_tail(t, m); 392 } 393 return m; 394 } ··· 402 * device doesn't work with the mode 0 default. They may likewise need 403 * to update clock rates or word sizes from initial values. This function 404 * changes those settings, and must be called from a context that can sleep. 405 + * The changes take effect the next time the device is selected and data 406 + * is transferred to or from it. 407 */ 408 static inline int 409 spi_setup(struct spi_device *spi) ··· 468 { 469 struct spi_transfer t = { 470 .tx_buf = buf, 471 .len = len, 472 }; 473 + struct spi_message m; 474 475 + spi_message_init(&m); 476 + spi_message_add_tail(&t, &m); 477 return spi_sync(spi, &m); 478 } 479 ··· 493 spi_read(struct spi_device *spi, u8 *buf, size_t len) 494 { 495 struct spi_transfer t = { 496 .rx_buf = buf, 497 .len = len, 498 }; 499 + struct spi_message m; 500 501 + spi_message_init(&m); 502 + spi_message_add_tail(&t, &m); 503 return spi_sync(spi, &m); 504 } 505
+7
include/linux/spi/spi_bitbang.h
··· 31 struct spi_master *master; 32 33 void (*chipselect)(struct spi_device *spi, int is_on); 34 35 int (*txrx_bufs)(struct spi_device *spi, struct spi_transfer *t); 36 u32 (*txrx_word[4])(struct spi_device *spi, 37 unsigned nsecs, 38 u32 word, u8 bits);
··· 31 struct spi_master *master; 32 33 void (*chipselect)(struct spi_device *spi, int is_on); 34 + #define BITBANG_CS_ACTIVE 1 /* normally nCS, active low */ 35 + #define BITBANG_CS_INACTIVE 0 36 37 + /* txrx_bufs() may handle dma mapping for transfers that don't 38 + * already have one (transfer.{tx,rx}_dma is zero), or use PIO 39 + */ 40 int (*txrx_bufs)(struct spi_device *spi, struct spi_transfer *t); 41 + 42 + /* txrx_word[SPI_MODE_*]() just looks like a shift register */ 43 u32 (*txrx_word[4])(struct spi_device *spi, 44 unsigned nsecs, 45 u32 word, u8 bits);