Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

USB: imx21-hcd accept arbitary transfer buffer alignement.

The hardware can only do DMA to 4 byte aligned addresses.
When this requirement is not met use PIO or a bounce buffer.

PIO is used when the buffer is small enough to directly
use the hardware data memory (2*maxpacket).

A bounce buffer is used for larger transfers.

Signed-off-by: Martin Fuzzey <mfuzzey@gmail.com>
Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>


authored by

Martin Fuzzey and committed by
Greg Kroah-Hartman
d0cc3d41 1dae423d

+169 -35
+163 -33
drivers/usb/host/imx21-hcd.c
··· 57 57 #include <linux/slab.h> 58 58 #include <linux/usb.h> 59 59 #include <linux/usb/hcd.h> 60 + #include <linux/dma-mapping.h> 60 61 61 62 #include "imx21-hcd.h" 62 63 ··· 137 136 return wrap_frame(readl(imx21->regs + USBH_FRMNUB)); 138 137 } 139 138 139 + static inline bool unsuitable_for_dma(dma_addr_t addr) 140 + { 141 + return (addr & 3) != 0; 142 + } 140 143 141 144 #include "imx21-dbg.c" 145 + 146 + static void nonisoc_urb_completed_for_etd( 147 + struct imx21 *imx21, struct etd_priv *etd, int status); 148 + static void schedule_nonisoc_etd(struct imx21 *imx21, struct urb *urb); 149 + static void free_dmem(struct imx21 *imx21, struct etd_priv *etd); 142 150 143 151 /* =========================================== */ 144 152 /* ETD management */ ··· 195 185 etd_writel(imx21, num, i, 0); 196 186 etd->urb = NULL; 197 187 etd->ep = NULL; 198 - etd->td = NULL;; 188 + etd->td = NULL; 189 + etd->bounce_buffer = NULL; 199 190 } 200 191 201 192 static void free_etd(struct imx21 *imx21, int num) ··· 232 221 ((u32) maxpacket << DW0_MAXPKTSIZ)); 233 222 } 234 223 235 - static void activate_etd(struct imx21 *imx21, 236 - int etd_num, dma_addr_t dma, u8 dir) 224 + /** 225 + * Copy buffer to data controller data memory. 226 + * We cannot use memcpy_toio() because the hardware requires 32bit writes 227 + */ 228 + static void copy_to_dmem( 229 + struct imx21 *imx21, int dmem_offset, void *src, int count) 230 + { 231 + void __iomem *dmem = imx21->regs + USBOTG_DMEM + dmem_offset; 232 + u32 word = 0; 233 + u8 *p = src; 234 + int byte = 0; 235 + int i; 236 + 237 + for (i = 0; i < count; i++) { 238 + byte = i % 4; 239 + word += (*p++ << (byte * 8)); 240 + if (byte == 3) { 241 + writel(word, dmem); 242 + dmem += 4; 243 + word = 0; 244 + } 245 + } 246 + 247 + if (count && byte != 3) 248 + writel(word, dmem); 249 + } 250 + 251 + static void activate_etd(struct imx21 *imx21, int etd_num, u8 dir) 237 252 { 238 253 u32 etd_mask = 1 << etd_num; 239 254 struct etd_priv *etd = &imx21->etd[etd_num]; 255 + 256 + if (etd->dma_handle && unsuitable_for_dma(etd->dma_handle)) { 257 + /* For non aligned isoc the condition below is always true */ 258 + if (etd->len <= etd->dmem_size) { 259 + /* Fits into data memory, use PIO */ 260 + if (dir != TD_DIR_IN) { 261 + copy_to_dmem(imx21, 262 + etd->dmem_offset, 263 + etd->cpu_buffer, etd->len); 264 + } 265 + etd->dma_handle = 0; 266 + 267 + } else { 268 + /* Too big for data memory, use bounce buffer */ 269 + enum dma_data_direction dmadir; 270 + 271 + if (dir == TD_DIR_IN) { 272 + dmadir = DMA_FROM_DEVICE; 273 + etd->bounce_buffer = kmalloc(etd->len, 274 + GFP_ATOMIC); 275 + } else { 276 + dmadir = DMA_TO_DEVICE; 277 + etd->bounce_buffer = kmemdup(etd->cpu_buffer, 278 + etd->len, 279 + GFP_ATOMIC); 280 + } 281 + if (!etd->bounce_buffer) { 282 + dev_err(imx21->dev, "failed bounce alloc\n"); 283 + goto err_bounce_alloc; 284 + } 285 + 286 + etd->dma_handle = 287 + dma_map_single(imx21->dev, 288 + etd->bounce_buffer, 289 + etd->len, 290 + dmadir); 291 + if (dma_mapping_error(imx21->dev, etd->dma_handle)) { 292 + dev_err(imx21->dev, "failed bounce map\n"); 293 + goto err_bounce_map; 294 + } 295 + } 296 + } 240 297 241 298 clear_toggle_bit(imx21, USBH_ETDDONESTAT, etd_mask); 242 299 set_register_bits(imx21, USBH_ETDDONEEN, etd_mask); 243 300 clear_toggle_bit(imx21, USBH_XFILLSTAT, etd_mask); 244 301 clear_toggle_bit(imx21, USBH_YFILLSTAT, etd_mask); 245 302 246 - if (dma) { 303 + if (etd->dma_handle) { 247 304 set_register_bits(imx21, USB_ETDDMACHANLCLR, etd_mask); 248 305 clear_toggle_bit(imx21, USBH_XBUFSTAT, etd_mask); 249 306 clear_toggle_bit(imx21, USBH_YBUFSTAT, etd_mask); 250 - writel(dma, imx21->regs + USB_ETDSMSA(etd_num)); 307 + writel(etd->dma_handle, imx21->regs + USB_ETDSMSA(etd_num)); 251 308 set_register_bits(imx21, USB_ETDDMAEN, etd_mask); 252 309 } else { 253 310 if (dir != TD_DIR_IN) { 254 - /* need to set for ZLP */ 311 + /* need to set for ZLP and PIO */ 255 312 set_toggle_bit(imx21, USBH_XFILLSTAT, etd_mask); 256 313 set_toggle_bit(imx21, USBH_YFILLSTAT, etd_mask); 257 314 } ··· 342 263 343 264 etd->active_count = 1; 344 265 writel(etd_mask, imx21->regs + USBH_ETDENSET); 266 + return; 267 + 268 + err_bounce_map: 269 + kfree(etd->bounce_buffer); 270 + 271 + err_bounce_alloc: 272 + free_dmem(imx21, etd); 273 + nonisoc_urb_completed_for_etd(imx21, etd, -ENOMEM); 345 274 } 346 275 347 276 /* =========================================== */ ··· 412 325 413 326 etd->dmem_offset = dmem_offset; 414 327 urb_priv->active = 1; 415 - activate_etd(imx21, etd_num, etd->dma_handle, dir); 328 + activate_etd(imx21, etd_num, dir); 416 329 } 417 330 418 331 static void free_dmem(struct imx21 *imx21, struct etd_priv *etd) ··· 472 385 /* =========================================== */ 473 386 /* End handling */ 474 387 /* =========================================== */ 475 - static void schedule_nonisoc_etd(struct imx21 *imx21, struct urb *urb); 476 388 477 389 /* Endpoint now idle - release it's ETD(s) or asssign to queued request */ 478 390 static void ep_idle(struct imx21 *imx21, struct ep_priv *ep_priv) ··· 534 448 ep_idle(imx21, ep_priv); 535 449 } 536 450 451 + static void nonisoc_urb_completed_for_etd( 452 + struct imx21 *imx21, struct etd_priv *etd, int status) 453 + { 454 + struct usb_host_endpoint *ep = etd->ep; 455 + 456 + urb_done(imx21->hcd, etd->urb, status); 457 + etd->urb = NULL; 458 + 459 + if (!list_empty(&ep->urb_list)) { 460 + struct urb *urb = list_first_entry( 461 + &ep->urb_list, struct urb, urb_list); 462 + 463 + dev_vdbg(imx21->dev, "next URB %p\n", urb); 464 + schedule_nonisoc_etd(imx21, urb); 465 + } 466 + } 467 + 468 + 537 469 /* =========================================== */ 538 470 /* ISOC Handling ... */ 539 471 /* =========================================== */ ··· 604 500 etd->ep = td->ep; 605 501 etd->urb = td->urb; 606 502 etd->len = td->len; 503 + etd->dma_handle = td->dma_handle; 504 + etd->cpu_buffer = td->cpu_buffer; 607 505 608 506 debug_isoc_submitted(imx21, cur_frame, td); 609 507 ··· 619 513 (TD_NOTACCESSED << DW3_COMPCODE0) | 620 514 (td->len << DW3_PKTLEN0)); 621 515 622 - activate_etd(imx21, etd_num, td->data, dir); 516 + activate_etd(imx21, etd_num, dir); 623 517 } 624 518 } 625 519 626 - static void isoc_etd_done(struct usb_hcd *hcd, struct urb *urb, int etd_num) 520 + static void isoc_etd_done(struct usb_hcd *hcd, int etd_num) 627 521 { 628 522 struct imx21 *imx21 = hcd_to_imx21(hcd); 629 523 int etd_mask = 1 << etd_num; 630 - struct urb_priv *urb_priv = urb->hcpriv; 631 524 struct etd_priv *etd = imx21->etd + etd_num; 525 + struct urb *urb = etd->urb; 526 + struct urb_priv *urb_priv = urb->hcpriv; 632 527 struct td *td = etd->td; 633 528 struct usb_host_endpoint *ep = etd->ep; 634 529 int isoc_index = td->isoc_index; ··· 663 556 bytes_xfrd, td->len, urb, etd_num, isoc_index); 664 557 } 665 558 666 - if (dir_in) 559 + if (dir_in) { 667 560 clear_toggle_bit(imx21, USBH_XFILLSTAT, etd_mask); 561 + if (!etd->dma_handle) 562 + memcpy_fromio(etd->cpu_buffer, 563 + imx21->regs + USBOTG_DMEM + etd->dmem_offset, 564 + bytes_xfrd); 565 + } 668 566 669 567 urb->actual_length += bytes_xfrd; 670 568 urb->iso_frame_desc[isoc_index].actual_length = bytes_xfrd; ··· 828 716 /* set up transfers */ 829 717 td = urb_priv->isoc_td; 830 718 for (i = 0; i < urb->number_of_packets; i++, td++) { 719 + unsigned int offset = urb->iso_frame_desc[i].offset; 831 720 td->ep = ep; 832 721 td->urb = urb; 833 722 td->len = urb->iso_frame_desc[i].length; 834 723 td->isoc_index = i; 835 724 td->frame = wrap_frame(urb->start_frame + urb->interval * i); 836 - td->data = urb->transfer_dma + urb->iso_frame_desc[i].offset; 725 + td->dma_handle = urb->transfer_dma + offset; 726 + td->cpu_buffer = urb->transfer_buffer + offset; 837 727 list_add_tail(&td->list, &ep_priv->td_list); 838 728 } 839 729 ··· 926 812 if (usb_pipecontrol(pipe) && (state != US_CTRL_DATA)) { 927 813 if (state == US_CTRL_SETUP) { 928 814 dir = TD_DIR_SETUP; 815 + if (unsuitable_for_dma(urb->setup_dma)) 816 + unmap_urb_setup_for_dma(imx21->hcd, urb); 929 817 etd->dma_handle = urb->setup_dma; 818 + etd->cpu_buffer = urb->setup_packet; 930 819 bufround = 0; 931 820 count = 8; 932 821 datatoggle = TD_TOGGLE_DATA0; 933 822 } else { /* US_CTRL_ACK */ 934 823 dir = usb_pipeout(pipe) ? TD_DIR_IN : TD_DIR_OUT; 935 - etd->dma_handle = urb->transfer_dma; 936 824 bufround = 0; 937 825 count = 0; 938 826 datatoggle = TD_TOGGLE_DATA1; ··· 942 826 } else { 943 827 dir = usb_pipeout(pipe) ? TD_DIR_OUT : TD_DIR_IN; 944 828 bufround = (dir == TD_DIR_IN) ? 1 : 0; 829 + if (unsuitable_for_dma(urb->transfer_dma)) 830 + unmap_urb_for_dma(imx21->hcd, urb); 831 + 945 832 etd->dma_handle = urb->transfer_dma; 833 + etd->cpu_buffer = urb->transfer_buffer; 946 834 if (usb_pipebulk(pipe) && (state == US_BULK0)) 947 835 count = 0; 948 836 else ··· 1021 901 /* enable the ETD to kick off transfer */ 1022 902 dev_vdbg(imx21->dev, "Activating etd %d for %d bytes %s\n", 1023 903 etd_num, count, dir != TD_DIR_IN ? "out" : "in"); 1024 - activate_etd(imx21, etd_num, etd->dma_handle, dir); 904 + activate_etd(imx21, etd_num, dir); 1025 905 1026 906 } 1027 907 1028 - static void nonisoc_etd_done(struct usb_hcd *hcd, struct urb *urb, int etd_num) 908 + static void nonisoc_etd_done(struct usb_hcd *hcd, int etd_num) 1029 909 { 1030 910 struct imx21 *imx21 = hcd_to_imx21(hcd); 1031 911 struct etd_priv *etd = &imx21->etd[etd_num]; 912 + struct urb *urb = etd->urb; 1032 913 u32 etd_mask = 1 << etd_num; 1033 914 struct urb_priv *urb_priv = urb->hcpriv; 1034 915 int dir; ··· 1051 930 if (dir == TD_DIR_IN) { 1052 931 clear_toggle_bit(imx21, USBH_XFILLSTAT, etd_mask); 1053 932 clear_toggle_bit(imx21, USBH_YFILLSTAT, etd_mask); 933 + 934 + if (etd->bounce_buffer) { 935 + memcpy(etd->cpu_buffer, etd->bounce_buffer, bytes_xfrd); 936 + dma_unmap_single(imx21->dev, 937 + etd->dma_handle, etd->len, DMA_FROM_DEVICE); 938 + } else if (!etd->dma_handle && bytes_xfrd) {/* PIO */ 939 + memcpy_fromio(etd->cpu_buffer, 940 + imx21->regs + USBOTG_DMEM + etd->dmem_offset, 941 + bytes_xfrd); 942 + } 1054 943 } 944 + 945 + kfree(etd->bounce_buffer); 946 + etd->bounce_buffer = NULL; 1055 947 free_dmem(imx21, etd); 1056 948 1057 949 urb->error_count = 0; ··· 1122 988 break; 1123 989 } 1124 990 1125 - if (!etd_done) { 991 + if (etd_done) 992 + nonisoc_urb_completed_for_etd(imx21, etd, cc_to_error[cc]); 993 + else { 1126 994 dev_vdbg(imx21->dev, "next state=%d\n", urb_priv->state); 1127 995 schedule_nonisoc_etd(imx21, urb); 1128 - } else { 1129 - struct usb_host_endpoint *ep = urb->ep; 1130 - 1131 - urb_done(hcd, urb, cc_to_error[cc]); 1132 - etd->urb = NULL; 1133 - 1134 - if (!list_empty(&ep->urb_list)) { 1135 - urb = list_first_entry(&ep->urb_list, 1136 - struct urb, urb_list); 1137 - dev_vdbg(imx21->dev, "next URB %p\n", urb); 1138 - schedule_nonisoc_etd(imx21, urb); 1139 - } 1140 996 } 1141 997 } 998 + 1142 999 1143 1000 static struct ep_priv *alloc_ep(void) 1144 1001 { ··· 1271 1146 } else if (urb_priv->active) { 1272 1147 int etd_num = ep_priv->etd[0]; 1273 1148 if (etd_num != -1) { 1149 + struct etd_priv *etd = &imx21->etd[etd_num]; 1150 + 1274 1151 disactivate_etd(imx21, etd_num); 1275 - free_dmem(imx21, &imx21->etd[etd_num]); 1276 - imx21->etd[etd_num].urb = NULL; 1152 + free_dmem(imx21, etd); 1153 + etd->urb = NULL; 1154 + kfree(etd->bounce_buffer); 1155 + etd->bounce_buffer = NULL; 1277 1156 } 1278 1157 } 1279 1158 ··· 1377 1248 } 1378 1249 1379 1250 if (usb_pipeisoc(etd->urb->pipe)) 1380 - isoc_etd_done(hcd, etd->urb, etd_num); 1251 + isoc_etd_done(hcd, etd_num); 1381 1252 else 1382 - nonisoc_etd_done(hcd, etd->urb, etd_num); 1253 + nonisoc_etd_done(hcd, etd_num); 1383 1254 } 1384 1255 1385 1256 /* only enable SOF interrupt if it may be needed for the kludge */ ··· 1847 1718 } 1848 1719 1849 1720 imx21 = hcd_to_imx21(hcd); 1721 + imx21->hcd = hcd; 1850 1722 imx21->dev = &pdev->dev; 1851 1723 imx21->pdata = pdev->dev.platform_data; 1852 1724 if (!imx21->pdata)
+6 -2
drivers/usb/host/imx21-hcd.h
··· 250 250 #define USBCTRL_USB_BYP (1 << 2) 251 251 #define USBCTRL_HOST1_TXEN_OE (1 << 1) 252 252 253 + #define USBOTG_DMEM 0x1000 253 254 254 255 /* Values in TD blocks */ 255 256 #define TD_DIR_SETUP 0 ··· 347 346 struct list_head list; 348 347 struct urb *urb; 349 348 struct usb_host_endpoint *ep; 350 - dma_addr_t data; 351 - unsigned long buf_addr; 349 + dma_addr_t dma_handle; 350 + void *cpu_buffer; 352 351 int len; 353 352 int frame; 354 353 int isoc_index; ··· 361 360 struct td *td; 362 361 struct list_head queue; 363 362 dma_addr_t dma_handle; 363 + void *cpu_buffer; 364 + void *bounce_buffer; 364 365 int alloc; 365 366 int len; 366 367 int dmem_size; ··· 415 412 struct imx21 { 416 413 spinlock_t lock; 417 414 struct device *dev; 415 + struct usb_hcd *hcd; 418 416 struct mx21_usbh_platform_data *pdata; 419 417 struct list_head dmem_list; 420 418 struct list_head queue_for_etd; /* eps queued due to etd shortage */