Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge master.kernel.org:/pub/scm/linux/kernel/git/gregkh/usb-2.6

* master.kernel.org:/pub/scm/linux/kernel/git/gregkh/usb-2.6: (44 commits)
USB: drivers/usb/storage/dpcm.c whitespace cleanup
USB: r8a66597-hcd: fixes some problem
USB: change name of spinlock in hcd.c
USB: move routines in hcd.c
USB: misc: uss720: clean up urb->status usage
USB: misc: usbtest: clean up urb->status usage
USB: misc: usblcd: clean up urb->status usage
USB: misc: phidgetmotorcontrol: clean up urb->status usage
USB: misc: phidgetkit: clean up urb->status usage
USB: misc: legousbtower: clean up urb->status usage
USB: misc: ldusb: clean up urb->status usage
USB: misc: iowarrior: clean up urb->status usage
USB: misc: ftdi-elan: clean up urb->status usage
USB: misc: auerswald: clean up urb->status usage
USB: misc: appledisplay: clean up urb->status usage
USB: misc: adtux: clean up urb->status usage
USB: core: message: clean up urb->status usage
USB: image: microtek: clean up urb->status usage
USB: image: mdc800: clean up urb->status usage
USB: storage: onetouch: clean up urb->status usage
...

+5402 -1131
+6
MAINTAINERS
··· 329 329 M: ink@jurassic.park.msu.ru 330 330 S: Maintained for 2.4; PCI support for 2.6. 331 331 332 + AMD GEODE CS5536 USB DEVICE CONTROLLER DRIVER 333 + P: Thomas Dahlmann 334 + M: thomas.dahlmann@amd.com 335 + L: info-linux@geode.amd.com 336 + S: Supported 337 + 332 338 AMD GEODE PROCESSOR/CHIPSET SUPPORT 333 339 P: Jordan Crouse 334 340 M: info-linux@geode.amd.com
+1 -2
drivers/usb/atm/cxacru.c
··· 456 456 int* actual_length) 457 457 { 458 458 struct timer_list timer; 459 - int status; 459 + int status = urb->status; 460 460 461 461 init_timer(&timer); 462 462 timer.expires = jiffies + msecs_to_jiffies(CMD_TIMEOUT); ··· 464 464 timer.function = cxacru_timeout_kill; 465 465 add_timer(&timer); 466 466 wait_for_completion(done); 467 - status = urb->status; 468 467 del_timer_sync(&timer); 469 468 470 469 if (actual_length)
+4 -3
drivers/usb/atm/speedtch.c
··· 612 612 struct speedtch_instance_data *instance = int_urb->context; 613 613 struct usbatm_data *usbatm = instance->usbatm; 614 614 unsigned int count = int_urb->actual_length; 615 - int ret = int_urb->status; 615 + int status = int_urb->status; 616 + int ret; 616 617 617 618 /* The magic interrupt for "up state" */ 618 619 static const unsigned char up_int[6] = { 0xa1, 0x00, 0x01, 0x00, 0x00, 0x00 }; ··· 622 621 623 622 atm_dbg(usbatm, "%s entered\n", __func__); 624 623 625 - if (ret < 0) { 626 - atm_dbg(usbatm, "%s: nonzero urb status %d!\n", __func__, ret); 624 + if (status < 0) { 625 + atm_dbg(usbatm, "%s: nonzero urb status %d!\n", __func__, status); 627 626 goto fail; 628 627 } 629 628
+4 -2
drivers/usb/atm/ueagle-atm.c
··· 1308 1308 { 1309 1309 struct uea_softc *sc = urb->context; 1310 1310 struct intr_pkt *intr = urb->transfer_buffer; 1311 + int status = urb->status; 1312 + 1311 1313 uea_enters(INS_TO_USBDEV(sc)); 1312 1314 1313 - if (unlikely(urb->status < 0)) { 1315 + if (unlikely(status < 0)) { 1314 1316 uea_err(INS_TO_USBDEV(sc), "uea_intr() failed with %d\n", 1315 - urb->status); 1317 + status); 1316 1318 return; 1317 1319 } 1318 1320
+6 -5
drivers/usb/atm/usbatm.c
··· 257 257 { 258 258 struct usbatm_channel *channel = urb->context; 259 259 unsigned long flags; 260 + int status = urb->status; 260 261 261 262 vdbg("%s: urb 0x%p, status %d, actual_length %d", 262 - __func__, urb, urb->status, urb->actual_length); 263 + __func__, urb, status, urb->actual_length); 263 264 264 265 /* usually in_interrupt(), but not always */ 265 266 spin_lock_irqsave(&channel->lock, flags); ··· 270 269 271 270 spin_unlock_irqrestore(&channel->lock, flags); 272 271 273 - if (unlikely(urb->status) && 272 + if (unlikely(status) && 274 273 (!(channel->usbatm->flags & UDSL_IGNORE_EILSEQ) || 275 - urb->status != -EILSEQ )) 274 + status != -EILSEQ )) 276 275 { 277 - if (urb->status == -ESHUTDOWN) 276 + if (status == -ESHUTDOWN) 278 277 return; 279 278 280 279 if (printk_ratelimit()) 281 280 atm_warn(channel->usbatm, "%s: urb 0x%p failed (%d)!\n", 282 - __func__, urb, urb->status); 281 + __func__, urb, status); 283 282 /* throttle processing in case of an error */ 284 283 mod_timer(&channel->delay, jiffies + msecs_to_jiffies(THROTTLE_MSECS)); 285 284 } else
+10 -8
drivers/usb/class/cdc-acm.c
··· 257 257 struct usb_cdc_notification *dr = urb->transfer_buffer; 258 258 unsigned char *data; 259 259 int newctrl; 260 - int status; 260 + int retval; 261 + int status = urb->status; 261 262 262 - switch (urb->status) { 263 + switch (status) { 263 264 case 0: 264 265 /* success */ 265 266 break; ··· 268 267 case -ENOENT: 269 268 case -ESHUTDOWN: 270 269 /* this urb is terminated, clean up */ 271 - dbg("%s - urb shutting down with status: %d", __FUNCTION__, urb->status); 270 + dbg("%s - urb shutting down with status: %d", __FUNCTION__, status); 272 271 return; 273 272 default: 274 - dbg("%s - nonzero urb status received: %d", __FUNCTION__, urb->status); 273 + dbg("%s - nonzero urb status received: %d", __FUNCTION__, status); 275 274 goto exit; 276 275 } 277 276 ··· 312 311 break; 313 312 } 314 313 exit: 315 - status = usb_submit_urb (urb, GFP_ATOMIC); 316 - if (status) 314 + retval = usb_submit_urb (urb, GFP_ATOMIC); 315 + if (retval) 317 316 err ("%s - usb_submit_urb failed with result %d", 318 - __FUNCTION__, status); 317 + __FUNCTION__, retval); 319 318 } 320 319 321 320 /* data interface returns incoming bytes, or we got unthrottled */ ··· 325 324 struct acm_ru *rcv = urb->context; 326 325 struct acm *acm = rcv->instance; 327 326 int status = urb->status; 328 - dbg("Entering acm_read_bulk with status %d", urb->status); 327 + 328 + dbg("Entering acm_read_bulk with status %d", status); 329 329 330 330 if (!ACM_READY(acm)) 331 331 return;
+15 -12
drivers/usb/class/usblp.c
··· 289 289 static void usblp_bulk_read(struct urb *urb) 290 290 { 291 291 struct usblp *usblp = urb->context; 292 + int status = urb->status; 292 293 293 294 if (usblp->present && usblp->used) { 294 - if (urb->status) 295 + if (status) 295 296 printk(KERN_WARNING "usblp%d: " 296 297 "nonzero read bulk status received: %d\n", 297 - usblp->minor, urb->status); 298 + usblp->minor, status); 298 299 } 299 300 spin_lock(&usblp->lock); 300 - if (urb->status < 0) 301 - usblp->rstatus = urb->status; 301 + if (status < 0) 302 + usblp->rstatus = status; 302 303 else 303 304 usblp->rstatus = urb->actual_length; 304 305 usblp->rcomplete = 1; ··· 312 311 static void usblp_bulk_write(struct urb *urb) 313 312 { 314 313 struct usblp *usblp = urb->context; 314 + int status = urb->status; 315 315 316 316 if (usblp->present && usblp->used) { 317 - if (urb->status) 317 + if (status) 318 318 printk(KERN_WARNING "usblp%d: " 319 319 "nonzero write bulk status received: %d\n", 320 - usblp->minor, urb->status); 320 + usblp->minor, status); 321 321 } 322 322 spin_lock(&usblp->lock); 323 - if (urb->status < 0) 324 - usblp->wstatus = urb->status; 323 + if (status < 0) 324 + usblp->wstatus = status; 325 325 else 326 326 usblp->wstatus = urb->actual_length; 327 327 usblp->wcomplete = 1; ··· 743 741 */ 744 742 rv = usblp_wwait(usblp, !!(file->f_flags&O_NONBLOCK)); 745 743 if (rv < 0) { 746 - /* 747 - * If interrupted, we simply leave the URB to dangle, 748 - * so the ->release will call usb_kill_urb(). 749 - */ 744 + if (rv == -EAGAIN) { 745 + /* Presume that it's going to complete well. */ 746 + writecount += transfer_length; 747 + } 748 + /* Leave URB dangling, to be cleaned on close. */ 750 749 goto collect_error; 751 750 } 752 751
+67 -64
drivers/usb/core/hcd.c
··· 99 99 /* used for controlling access to virtual root hubs */ 100 100 static DEFINE_SPINLOCK(hcd_root_hub_lock); 101 101 102 - /* used when updating hcd data */ 103 - static DEFINE_SPINLOCK(hcd_data_lock); 102 + /* used when updating an endpoint's URB list */ 103 + static DEFINE_SPINLOCK(hcd_urb_list_lock); 104 104 105 105 /* wait queue for synchronous unlinks */ 106 106 DECLARE_WAIT_QUEUE_HEAD(usb_kill_urb_queue); 107 + 108 + static inline int is_root_hub(struct usb_device *udev) 109 + { 110 + return (udev->parent == NULL); 111 + } 107 112 108 113 /*-------------------------------------------------------------------------*/ 109 114 ··· 911 906 static void urb_unlink(struct usb_hcd *hcd, struct urb *urb) 912 907 { 913 908 unsigned long flags; 914 - int at_root_hub = (urb->dev == hcd->self.root_hub); 915 909 916 910 /* clear all state linking urb to this dev (and hcd) */ 917 - spin_lock_irqsave (&hcd_data_lock, flags); 911 + spin_lock_irqsave(&hcd_urb_list_lock, flags); 918 912 list_del_init (&urb->urb_list); 919 - spin_unlock_irqrestore (&hcd_data_lock, flags); 913 + spin_unlock_irqrestore(&hcd_urb_list_lock, flags); 920 914 921 - if (hcd->self.uses_dma && !at_root_hub) { 915 + if (hcd->self.uses_dma && !is_root_hub(urb->dev)) { 922 916 if (usb_pipecontrol (urb->pipe) 923 917 && !(urb->transfer_flags & URB_NO_SETUP_DMA_MAP)) 924 918 dma_unmap_single (hcd->self.controller, urb->setup_dma, ··· 959 955 960 956 // FIXME: verify that quiescing hc works right (RH cleans up) 961 957 962 - spin_lock_irqsave (&hcd_data_lock, flags); 958 + spin_lock_irqsave(&hcd_urb_list_lock, flags); 963 959 ep = (usb_pipein(urb->pipe) ? urb->dev->ep_in : urb->dev->ep_out) 964 960 [usb_pipeendpoint(urb->pipe)]; 965 961 if (unlikely (!ep)) ··· 976 972 status = -ESHUTDOWN; 977 973 break; 978 974 } 979 - spin_unlock_irqrestore (&hcd_data_lock, flags); 975 + spin_unlock_irqrestore(&hcd_urb_list_lock, flags); 980 976 if (status) { 981 977 INIT_LIST_HEAD (&urb->urb_list); 982 978 usbmon_urb_submit_error(&hcd->self, urb, status); ··· 990 986 urb = usb_get_urb (urb); 991 987 atomic_inc (&urb->use_count); 992 988 993 - if (urb->dev == hcd->self.root_hub) { 989 + if (is_root_hub(urb->dev)) { 994 990 /* NOTE: requirement on hub callers (usbfs and the hub 995 991 * driver, for now) that URBs' urb->transfer_buffer be 996 992 * valid and usb_buffer_{sync,unmap}() not be needed, since ··· 1037 1033 1038 1034 /*-------------------------------------------------------------------------*/ 1039 1035 1040 - /* called in any context */ 1041 - int usb_hcd_get_frame_number (struct usb_device *udev) 1042 - { 1043 - struct usb_hcd *hcd = bus_to_hcd(udev->bus); 1044 - 1045 - if (!HC_IS_RUNNING (hcd->state)) 1046 - return -ESHUTDOWN; 1047 - return hcd->driver->get_frame_number (hcd); 1048 - } 1049 - 1050 - /*-------------------------------------------------------------------------*/ 1051 - 1052 1036 /* this makes the hcd giveback() the urb more quickly, by kicking it 1053 1037 * off hardware queues (which may take a while) and returning it as 1054 1038 * soon as practical. we've already set up the urb's return status, ··· 1047 1055 { 1048 1056 int value; 1049 1057 1050 - if (urb->dev == hcd->self.root_hub) 1058 + if (is_root_hub(urb->dev)) 1051 1059 value = usb_rh_urb_dequeue (hcd, urb); 1052 1060 else { 1053 1061 ··· 1095 1103 * that it was submitted. But as a rule it can't know whether or 1096 1104 * not it's already been unlinked ... so we respect the reversed 1097 1105 * lock sequence needed for the usb_hcd_giveback_urb() code paths 1098 - * (urb lock, then hcd_data_lock) in case some other CPU is now 1106 + * (urb lock, then hcd_urb_list_lock) in case some other CPU is now 1099 1107 * unlinking it. 1100 1108 */ 1101 1109 spin_lock_irqsave (&urb->lock, flags); 1102 - spin_lock (&hcd_data_lock); 1110 + spin_lock(&hcd_urb_list_lock); 1103 1111 1104 1112 sys = &urb->dev->dev; 1105 1113 hcd = bus_to_hcd(urb->dev->bus); ··· 1131 1139 * finish unlinking the initial failed usb_set_address() 1132 1140 * or device descriptor fetch. 1133 1141 */ 1134 - if (!test_bit(HCD_FLAG_SAW_IRQ, &hcd->flags) 1135 - && hcd->self.root_hub != urb->dev) { 1142 + if (!test_bit(HCD_FLAG_SAW_IRQ, &hcd->flags) && 1143 + !is_root_hub(urb->dev)) { 1136 1144 dev_warn (hcd->self.controller, "Unlink after no-IRQ? " 1137 - "Controller is probably using the wrong IRQ." 1138 - "\n"); 1145 + "Controller is probably using the wrong IRQ.\n"); 1139 1146 set_bit(HCD_FLAG_SAW_IRQ, &hcd->flags); 1140 1147 } 1141 1148 1142 1149 urb->status = status; 1143 1150 1144 - spin_unlock (&hcd_data_lock); 1151 + spin_unlock(&hcd_urb_list_lock); 1145 1152 spin_unlock_irqrestore (&urb->lock, flags); 1146 1153 1147 1154 retval = unlink1 (hcd, urb); ··· 1149 1158 return retval; 1150 1159 1151 1160 done: 1152 - spin_unlock (&hcd_data_lock); 1161 + spin_unlock(&hcd_urb_list_lock); 1153 1162 spin_unlock_irqrestore (&urb->lock, flags); 1154 1163 if (retval != -EIDRM && sys && sys->driver) 1155 1164 dev_dbg (sys, "hcd_unlink_urb %p fail %d\n", urb, retval); 1156 1165 return retval; 1157 1166 } 1167 + 1168 + /*-------------------------------------------------------------------------*/ 1169 + 1170 + /** 1171 + * usb_hcd_giveback_urb - return URB from HCD to device driver 1172 + * @hcd: host controller returning the URB 1173 + * @urb: urb being returned to the USB device driver. 1174 + * Context: in_interrupt() 1175 + * 1176 + * This hands the URB from HCD to its USB device driver, using its 1177 + * completion function. The HCD has freed all per-urb resources 1178 + * (and is done using urb->hcpriv). It also released all HCD locks; 1179 + * the device driver won't cause problems if it frees, modifies, 1180 + * or resubmits this URB. 1181 + */ 1182 + void usb_hcd_giveback_urb (struct usb_hcd *hcd, struct urb *urb) 1183 + { 1184 + urb_unlink(hcd, urb); 1185 + usbmon_urb_complete (&hcd->self, urb); 1186 + usb_unanchor_urb(urb); 1187 + 1188 + /* pass ownership to the completion handler */ 1189 + urb->complete (urb); 1190 + atomic_dec (&urb->use_count); 1191 + if (unlikely (urb->reject)) 1192 + wake_up (&usb_kill_urb_queue); 1193 + usb_put_urb (urb); 1194 + } 1195 + EXPORT_SYMBOL (usb_hcd_giveback_urb); 1158 1196 1159 1197 /*-------------------------------------------------------------------------*/ 1160 1198 ··· 1206 1186 1207 1187 /* ep is already gone from udev->ep_{in,out}[]; no more submits */ 1208 1188 rescan: 1209 - spin_lock (&hcd_data_lock); 1189 + spin_lock(&hcd_urb_list_lock); 1210 1190 list_for_each_entry (urb, &ep->urb_list, urb_list) { 1211 1191 int tmp; 1212 1192 ··· 1214 1194 if (urb->status != -EINPROGRESS) 1215 1195 continue; 1216 1196 usb_get_urb (urb); 1217 - spin_unlock (&hcd_data_lock); 1197 + spin_unlock(&hcd_urb_list_lock); 1218 1198 1219 1199 spin_lock (&urb->lock); 1220 1200 tmp = urb->status; ··· 1243 1223 /* list contents may have changed */ 1244 1224 goto rescan; 1245 1225 } 1246 - spin_unlock (&hcd_data_lock); 1226 + spin_unlock(&hcd_urb_list_lock); 1247 1227 local_irq_enable (); 1248 1228 1249 1229 /* synchronize with the hardware, so old configuration state ··· 1260 1240 * endpoint_disable methods. 1261 1241 */ 1262 1242 while (!list_empty (&ep->urb_list)) { 1263 - spin_lock_irq (&hcd_data_lock); 1243 + spin_lock_irq(&hcd_urb_list_lock); 1264 1244 1265 1245 /* The list may have changed while we acquired the spinlock */ 1266 1246 urb = NULL; ··· 1269 1249 urb_list); 1270 1250 usb_get_urb (urb); 1271 1251 } 1272 - spin_unlock_irq (&hcd_data_lock); 1252 + spin_unlock_irq(&hcd_urb_list_lock); 1273 1253 1274 1254 if (urb) { 1275 1255 usb_kill_urb (urb); 1276 1256 usb_put_urb (urb); 1277 1257 } 1278 1258 } 1259 + } 1260 + 1261 + /*-------------------------------------------------------------------------*/ 1262 + 1263 + /* called in any context */ 1264 + int usb_hcd_get_frame_number (struct usb_device *udev) 1265 + { 1266 + struct usb_hcd *hcd = bus_to_hcd(udev->bus); 1267 + 1268 + if (!HC_IS_RUNNING (hcd->state)) 1269 + return -ESHUTDOWN; 1270 + return hcd->driver->get_frame_number (hcd); 1279 1271 } 1280 1272 1281 1273 /*-------------------------------------------------------------------------*/ ··· 1423 1391 EXPORT_SYMBOL (usb_bus_start_enum); 1424 1392 1425 1393 #endif 1426 - 1427 - /*-------------------------------------------------------------------------*/ 1428 - 1429 - /** 1430 - * usb_hcd_giveback_urb - return URB from HCD to device driver 1431 - * @hcd: host controller returning the URB 1432 - * @urb: urb being returned to the USB device driver. 1433 - * Context: in_interrupt() 1434 - * 1435 - * This hands the URB from HCD to its USB device driver, using its 1436 - * completion function. The HCD has freed all per-urb resources 1437 - * (and is done using urb->hcpriv). It also released all HCD locks; 1438 - * the device driver won't cause problems if it frees, modifies, 1439 - * or resubmits this URB. 1440 - */ 1441 - void usb_hcd_giveback_urb (struct usb_hcd *hcd, struct urb *urb) 1442 - { 1443 - urb_unlink(hcd, urb); 1444 - usbmon_urb_complete (&hcd->self, urb); 1445 - usb_unanchor_urb(urb); 1446 - 1447 - /* pass ownership to the completion handler */ 1448 - urb->complete (urb); 1449 - atomic_dec (&urb->use_count); 1450 - if (unlikely (urb->reject)) 1451 - wake_up (&usb_kill_urb_queue); 1452 - usb_put_urb (urb); 1453 - } 1454 - EXPORT_SYMBOL (usb_hcd_giveback_urb); 1455 1394 1456 1395 /*-------------------------------------------------------------------------*/ 1457 1396
+6 -4
drivers/usb/core/hub.c
··· 1335 1335 udev->dev.devt = MKDEV(USB_DEVICE_MAJOR, 1336 1336 (((udev->bus->busnum-1) * 128) + (udev->devnum-1))); 1337 1337 1338 + /* Increment the parent's count of unsuspended children */ 1339 + if (udev->parent) 1340 + usb_autoresume_device(udev->parent); 1341 + 1338 1342 /* Register the device. The device driver is responsible 1339 1343 * for adding the device files to sysfs and for configuring 1340 1344 * the device. ··· 1346 1342 err = device_add(&udev->dev); 1347 1343 if (err) { 1348 1344 dev_err(&udev->dev, "can't device_add, error %d\n", err); 1345 + if (udev->parent) 1346 + usb_autosuspend_device(udev->parent); 1349 1347 goto fail; 1350 1348 } 1351 - 1352 - /* Increment the parent's count of unsuspended children */ 1353 - if (udev->parent) 1354 - usb_autoresume_device(udev->parent); 1355 1349 1356 1350 exit: 1357 1351 return err;
+18 -16
drivers/usb/core/message.c
··· 34 34 { 35 35 struct completion done; 36 36 unsigned long expire; 37 - int status; 37 + int retval; 38 + int status = urb->status; 38 39 39 40 init_completion(&done); 40 41 urb->context = &done; 41 42 urb->actual_length = 0; 42 - status = usb_submit_urb(urb, GFP_NOIO); 43 - if (unlikely(status)) 43 + retval = usb_submit_urb(urb, GFP_NOIO); 44 + if (unlikely(retval)) 44 45 goto out; 45 46 46 47 expire = timeout ? msecs_to_jiffies(timeout) : MAX_SCHEDULE_TIMEOUT; ··· 56 55 urb->transfer_buffer_length); 57 56 58 57 usb_kill_urb(urb); 59 - status = urb->status == -ENOENT ? -ETIMEDOUT : urb->status; 58 + retval = status == -ENOENT ? -ETIMEDOUT : status; 60 59 } else 61 - status = urb->status; 60 + retval = status; 62 61 out: 63 62 if (actual_length) 64 63 *actual_length = urb->actual_length; 65 64 66 65 usb_free_urb(urb); 67 - return status; 66 + return retval; 68 67 } 69 68 70 69 /*-------------------------------------------------------------------*/ ··· 251 250 static void sg_complete (struct urb *urb) 252 251 { 253 252 struct usb_sg_request *io = urb->context; 253 + int status = urb->status; 254 254 255 255 spin_lock (&io->lock); 256 256 ··· 267 265 */ 268 266 if (io->status 269 267 && (io->status != -ECONNRESET 270 - || urb->status != -ECONNRESET) 268 + || status != -ECONNRESET) 271 269 && urb->actual_length) { 272 270 dev_err (io->dev->bus->controller, 273 271 "dev %s ep%d%s scatterlist error %d/%d\n", 274 272 io->dev->devpath, 275 273 usb_pipeendpoint (urb->pipe), 276 274 usb_pipein (urb->pipe) ? "in" : "out", 277 - urb->status, io->status); 275 + status, io->status); 278 276 // BUG (); 279 277 } 280 278 281 - if (io->status == 0 && urb->status && urb->status != -ECONNRESET) { 282 - int i, found, status; 279 + if (io->status == 0 && status && status != -ECONNRESET) { 280 + int i, found, retval; 283 281 284 - io->status = urb->status; 282 + io->status = status; 285 283 286 284 /* the previous urbs, and this one, completed already. 287 285 * unlink pending urbs so they won't rx/tx bad data. ··· 292 290 if (!io->urbs [i] || !io->urbs [i]->dev) 293 291 continue; 294 292 if (found) { 295 - status = usb_unlink_urb (io->urbs [i]); 296 - if (status != -EINPROGRESS 297 - && status != -ENODEV 298 - && status != -EBUSY) 293 + retval = usb_unlink_urb (io->urbs [i]); 294 + if (retval != -EINPROGRESS && 295 + retval != -ENODEV && 296 + retval != -EBUSY) 299 297 dev_err (&io->dev->dev, 300 298 "%s, unlink --> %d\n", 301 - __FUNCTION__, status); 299 + __FUNCTION__, retval); 302 300 } else if (urb == io->urbs [i]) 303 301 found = 1; 304 302 }
+53
drivers/usb/core/sysfs.c
··· 441 441 .attrs = dev_attrs, 442 442 }; 443 443 444 + /* Binary descriptors */ 445 + 446 + static ssize_t 447 + read_descriptors(struct kobject *kobj, struct bin_attribute *attr, 448 + char *buf, loff_t off, size_t count) 449 + { 450 + struct usb_device *udev = to_usb_device( 451 + container_of(kobj, struct device, kobj)); 452 + size_t nleft = count; 453 + size_t srclen, n; 454 + 455 + usb_lock_device(udev); 456 + 457 + /* The binary attribute begins with the device descriptor */ 458 + srclen = sizeof(struct usb_device_descriptor); 459 + if (off < srclen) { 460 + n = min_t(size_t, nleft, srclen - off); 461 + memcpy(buf, off + (char *) &udev->descriptor, n); 462 + nleft -= n; 463 + buf += n; 464 + off = 0; 465 + } else { 466 + off -= srclen; 467 + } 468 + 469 + /* Then follows the raw descriptor entry for the current 470 + * configuration (config plus subsidiary descriptors). 471 + */ 472 + if (udev->actconfig) { 473 + int cfgno = udev->actconfig - udev->config; 474 + 475 + srclen = __le16_to_cpu(udev->actconfig->desc.wTotalLength); 476 + if (off < srclen) { 477 + n = min_t(size_t, nleft, srclen - off); 478 + memcpy(buf, off + udev->rawdescriptors[cfgno], n); 479 + nleft -= n; 480 + } 481 + } 482 + usb_unlock_device(udev); 483 + return count - nleft; 484 + } 485 + 486 + static struct bin_attribute dev_bin_attr_descriptors = { 487 + .attr = {.name = "descriptors", .mode = 0444}, 488 + .read = read_descriptors, 489 + .size = 18 + 65535, /* dev descr + max-size raw descriptor */ 490 + }; 491 + 444 492 int usb_create_sysfs_dev_files(struct usb_device *udev) 445 493 { 446 494 struct device *dev = &udev->dev; ··· 497 449 retval = sysfs_create_group(&dev->kobj, &dev_attr_grp); 498 450 if (retval) 499 451 return retval; 452 + 453 + retval = device_create_bin_file(dev, &dev_bin_attr_descriptors); 454 + if (retval) 455 + goto error; 500 456 501 457 retval = add_persist_attributes(dev); 502 458 if (retval) ··· 544 492 device_remove_file(dev, &dev_attr_serial); 545 493 remove_power_attributes(dev); 546 494 remove_persist_attributes(dev); 495 + device_remove_bin_file(dev, &dev_bin_attr_descriptors); 547 496 sysfs_remove_group(&dev->kobj, &dev_attr_grp); 548 497 } 549 498
+41 -39
drivers/usb/core/urb.c
··· 440 440 * @urb: pointer to urb describing a previously submitted request, 441 441 * may be NULL 442 442 * 443 - * This routine cancels an in-progress request. URBs complete only 444 - * once per submission, and may be canceled only once per submission. 445 - * Successful cancellation means the requests's completion handler will 446 - * be called with a status code indicating that the request has been 447 - * canceled (rather than any other code) and will quickly be removed 448 - * from host controller data structures. 443 + * This routine cancels an in-progress request. URBs complete only once 444 + * per submission, and may be canceled only once per submission. 445 + * Successful cancellation means termination of @urb will be expedited 446 + * and the completion handler will be called with a status code 447 + * indicating that the request has been canceled (rather than any other 448 + * code). 449 449 * 450 - * This request is always asynchronous. 451 - * Success is indicated by returning -EINPROGRESS, 452 - * at which time the URB will normally have been unlinked but not yet 453 - * given back to the device driver. When it is called, the completion 454 - * function will see urb->status == -ECONNRESET. Failure is indicated 455 - * by any other return value. Unlinking will fail when the URB is not 456 - * currently "linked" (i.e., it was never submitted, or it was unlinked 457 - * before, or the hardware is already finished with it), even if the 458 - * completion handler has not yet run. 450 + * This request is always asynchronous. Success is indicated by 451 + * returning -EINPROGRESS, at which time the URB will probably not yet 452 + * have been given back to the device driver. When it is eventually 453 + * called, the completion function will see @urb->status == -ECONNRESET. 454 + * Failure is indicated by usb_unlink_urb() returning any other value. 455 + * Unlinking will fail when @urb is not currently "linked" (i.e., it was 456 + * never submitted, or it was unlinked before, or the hardware is already 457 + * finished with it), even if the completion handler has not yet run. 459 458 * 460 459 * Unlinking and Endpoint Queues: 460 + * 461 + * [The behaviors and guarantees described below do not apply to virtual 462 + * root hubs but only to endpoint queues for physical USB devices.] 461 463 * 462 464 * Host Controller Drivers (HCDs) place all the URBs for a particular 463 465 * endpoint in a queue. Normally the queue advances as the controller 464 466 * hardware processes each request. But when an URB terminates with an 465 - * error its queue stops, at least until that URB's completion routine 466 - * returns. It is guaranteed that the queue will not restart until all 467 - * its unlinked URBs have been fully retired, with their completion 468 - * routines run, even if that's not until some time after the original 469 - * completion handler returns. Normally the same behavior and guarantees 470 - * apply when an URB terminates because it was unlinked; however if an 471 - * URB is unlinked before the hardware has started to execute it, then 472 - * its queue is not guaranteed to stop until all the preceding URBs have 473 - * completed. 467 + * error its queue generally stops (see below), at least until that URB's 468 + * completion routine returns. It is guaranteed that a stopped queue 469 + * will not restart until all its unlinked URBs have been fully retired, 470 + * with their completion routines run, even if that's not until some time 471 + * after the original completion handler returns. The same behavior and 472 + * guarantee apply when an URB terminates because it was unlinked. 474 473 * 475 - * This means that USB device drivers can safely build deep queues for 476 - * large or complex transfers, and clean them up reliably after any sort 477 - * of aborted transfer by unlinking all pending URBs at the first fault. 474 + * Bulk and interrupt endpoint queues are guaranteed to stop whenever an 475 + * URB terminates with any sort of error, including -ECONNRESET, -ENOENT, 476 + * and -EREMOTEIO. Control endpoint queues behave the same way except 477 + * that they are not guaranteed to stop for -EREMOTEIO errors. Queues 478 + * for isochronous endpoints are treated differently, because they must 479 + * advance at fixed rates. Such queues do not stop when an URB 480 + * encounters an error or is unlinked. An unlinked isochronous URB may 481 + * leave a gap in the stream of packets; it is undefined whether such 482 + * gaps can be filled in. 478 483 * 479 - * Note that an URB terminating early because a short packet was received 480 - * will count as an error if and only if the URB_SHORT_NOT_OK flag is set. 481 - * Also, that all unlinks performed in any URB completion handler must 482 - * be asynchronous. 484 + * Note that early termination of an URB because a short packet was 485 + * received will generate a -EREMOTEIO error if and only if the 486 + * URB_SHORT_NOT_OK flag is set. By setting this flag, USB device 487 + * drivers can build deep queues for large or complex bulk transfers 488 + * and clean them up reliably after any sort of aborted transfer by 489 + * unlinking all pending URBs at the first fault. 483 490 * 484 - * Queues for isochronous endpoints are treated differently, because they 485 - * advance at fixed rates. Such queues do not stop when an URB is unlinked. 486 - * An unlinked URB may leave a gap in the stream of packets. It is undefined 487 - * whether such gaps can be filled in. 488 - * 489 - * When a control URB terminates with an error, it is likely that the 490 - * status stage of the transfer will not take place, even if it is merely 491 - * a soft error resulting from a short-packet with URB_SHORT_NOT_OK set. 491 + * When a control URB terminates with an error other than -EREMOTEIO, it 492 + * is quite likely that the status stage of the transfer will not take 493 + * place. 492 494 */ 493 495 int usb_unlink_urb(struct urb *urb) 494 496 {
+39 -18
drivers/usb/gadget/Kconfig
··· 82 82 Many controller drivers are platform-specific; these 83 83 often need board-specific hooks. 84 84 85 + config USB_GADGET_AMD5536UDC 86 + boolean "AMD5536 UDC" 87 + depends on PCI 88 + select USB_GADGET_DUALSPEED 89 + help 90 + The AMD5536 UDC is part of the AMD Geode CS5536, an x86 southbridge. 91 + It is a USB Highspeed DMA capable USB device controller. Beside ep0 92 + it provides 4 IN and 4 OUT endpoints (bulk or interrupt type). 93 + The UDC port supports OTG operation, and may be used as a host port 94 + if it's not being used to implement peripheral or OTG roles. 95 + 96 + Say "y" to link the driver statically, or "m" to build a 97 + dynamically linked module called "amd5536udc" and force all 98 + gadget drivers to also be dynamically linked. 99 + 100 + config USB_AMD5536UDC 101 + tristate 102 + depends on USB_GADGET_AMD5536UDC 103 + default USB_GADGET 104 + select USB_GADGET_SELECTED 105 + 85 106 config USB_GADGET_FSL_USB2 86 107 boolean "Freescale Highspeed USB DR Peripheral Controller" 87 108 depends on MPC834x || PPC_MPC831x ··· 176 155 default y if USB_ZERO 177 156 default y if USB_ETH 178 157 default y if USB_G_SERIAL 158 + 159 + config USB_GADGET_M66592 160 + boolean "Renesas M66592 USB Peripheral Controller" 161 + select USB_GADGET_DUALSPEED 162 + help 163 + M66592 is a discrete USB peripheral controller chip that 164 + supports both full and high speed USB 2.0 data transfers. 165 + It has seven configurable endpoints, and endpoint zero. 166 + 167 + Say "y" to link the driver statically, or "m" to build a 168 + dynamically linked module called "m66592_udc" and force all 169 + gadget drivers to also be dynamically linked. 170 + 171 + config USB_M66592 172 + tristate 173 + depends on USB_GADGET_M66592 174 + default USB_GADGET 175 + select USB_GADGET_SELECTED 179 176 180 177 config USB_GADGET_GOKU 181 178 boolean "Toshiba TC86C001 'Goku-S'" ··· 299 260 tristate 300 261 depends on USB_GADGET_AT91 301 262 default USB_GADGET 302 - 303 - config USB_GADGET_M66592 304 - boolean "M66592 driver" 305 - select USB_GADGET_DUALSPEED 306 - help 307 - M66592 is a USB 2.0 peripheral controller. 308 - 309 - It has seven configurable endpoints, and endpoint zero. 310 - 311 - Say "y" to link the driver statically, or "m" to build a 312 - dynamically linked module called "m66592_udc" and force all 313 - gadget drivers to also be dynamically linked. 314 - 315 - config USB_M66592 316 - tristate 317 - depends on USB_GADGET_M66592 318 - default USB_GADGET 319 - select USB_GADGET_SELECTED 320 263 321 264 config USB_GADGET_DUMMY_HCD 322 265 boolean "Dummy HCD (DEVELOPMENT)"
+1
drivers/usb/gadget/Makefile
··· 7 7 8 8 obj-$(CONFIG_USB_DUMMY_HCD) += dummy_hcd.o 9 9 obj-$(CONFIG_USB_NET2280) += net2280.o 10 + obj-$(CONFIG_USB_AMD5536UDC) += amd5536udc.o 10 11 obj-$(CONFIG_USB_PXA2XX) += pxa2xx_udc.o 11 12 obj-$(CONFIG_USB_GOKU) += goku_udc.o 12 13 obj-$(CONFIG_USB_OMAP) += omap_udc.o
+3454
drivers/usb/gadget/amd5536udc.c
··· 1 + /* 2 + * amd5536.c -- AMD 5536 UDC high/full speed USB device controller 3 + * 4 + * Copyright (C) 2005-2007 AMD (http://www.amd.com) 5 + * Author: Thomas Dahlmann 6 + * 7 + * This program is free software; you can redistribute it and/or modify 8 + * it under the terms of the GNU General Public License as published by 9 + * the Free Software Foundation; either version 2 of the License, or 10 + * (at your option) any later version. 11 + * 12 + * This program is distributed in the hope that it will be useful, 13 + * but WITHOUT ANY WARRANTY; without even the implied warranty of 14 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 15 + * GNU General Public License for more details. 16 + * 17 + * You should have received a copy of the GNU General Public License 18 + * along with this program; if not, write to the Free Software 19 + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 20 + */ 21 + 22 + /* 23 + * The AMD5536 UDC is part of the x86 southbridge AMD Geode CS5536. 24 + * It is a USB Highspeed DMA capable USB device controller. Beside ep0 it 25 + * provides 4 IN and 4 OUT endpoints (bulk or interrupt type). 26 + * 27 + * Make sure that UDC is assigned to port 4 by BIOS settings (port can also 28 + * be used as host port) and UOC bits PAD_EN and APU are set (should be done 29 + * by BIOS init). 30 + * 31 + * UDC DMA requires 32-bit aligned buffers so DMA with gadget ether does not 32 + * work without updating NET_IP_ALIGN. Or PIO mode (module param "use_dma=0") 33 + * can be used with gadget ether. 34 + */ 35 + 36 + /* debug control */ 37 + /* #define UDC_VERBOSE */ 38 + 39 + /* Driver strings */ 40 + #define UDC_MOD_DESCRIPTION "AMD 5536 UDC - USB Device Controller" 41 + #define UDC_DRIVER_VERSION_STRING "01.00.0206 - $Revision: #3 $" 42 + 43 + /* system */ 44 + #include <linux/module.h> 45 + #include <linux/pci.h> 46 + #include <linux/kernel.h> 47 + #include <linux/version.h> 48 + #include <linux/delay.h> 49 + #include <linux/ioport.h> 50 + #include <linux/sched.h> 51 + #include <linux/slab.h> 52 + #include <linux/smp_lock.h> 53 + #include <linux/errno.h> 54 + #include <linux/init.h> 55 + #include <linux/timer.h> 56 + #include <linux/list.h> 57 + #include <linux/interrupt.h> 58 + #include <linux/ioctl.h> 59 + #include <linux/fs.h> 60 + #include <linux/dmapool.h> 61 + #include <linux/moduleparam.h> 62 + #include <linux/device.h> 63 + #include <linux/io.h> 64 + #include <linux/irq.h> 65 + 66 + #include <asm/byteorder.h> 67 + #include <asm/system.h> 68 + #include <asm/unaligned.h> 69 + 70 + /* gadget stack */ 71 + #include <linux/usb/ch9.h> 72 + #include <linux/usb_gadget.h> 73 + 74 + /* udc specific */ 75 + #include "amd5536udc.h" 76 + 77 + 78 + static void udc_tasklet_disconnect(unsigned long); 79 + static void empty_req_queue(struct udc_ep *); 80 + static int udc_probe(struct udc *dev); 81 + static void udc_basic_init(struct udc *dev); 82 + static void udc_setup_endpoints(struct udc *dev); 83 + static void udc_soft_reset(struct udc *dev); 84 + static struct udc_request *udc_alloc_bna_dummy(struct udc_ep *ep); 85 + static void udc_free_request(struct usb_ep *usbep, struct usb_request *usbreq); 86 + static int udc_free_dma_chain(struct udc *dev, struct udc_request *req); 87 + static int udc_create_dma_chain(struct udc_ep *ep, struct udc_request *req, 88 + unsigned long buf_len, gfp_t gfp_flags); 89 + static int udc_remote_wakeup(struct udc *dev); 90 + static int udc_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id); 91 + static void udc_pci_remove(struct pci_dev *pdev); 92 + 93 + /* description */ 94 + static const char mod_desc[] = UDC_MOD_DESCRIPTION; 95 + static const char name[] = "amd5536udc"; 96 + 97 + /* structure to hold endpoint function pointers */ 98 + static const struct usb_ep_ops udc_ep_ops; 99 + 100 + /* received setup data */ 101 + static union udc_setup_data setup_data; 102 + 103 + /* pointer to device object */ 104 + static struct udc *udc; 105 + 106 + /* irq spin lock for soft reset */ 107 + static DEFINE_SPINLOCK(udc_irq_spinlock); 108 + /* stall spin lock */ 109 + static DEFINE_SPINLOCK(udc_stall_spinlock); 110 + 111 + /* 112 + * slave mode: pending bytes in rx fifo after nyet, 113 + * used if EPIN irq came but no req was available 114 + */ 115 + static unsigned int udc_rxfifo_pending; 116 + 117 + /* count soft resets after suspend to avoid loop */ 118 + static int soft_reset_occured; 119 + static int soft_reset_after_usbreset_occured; 120 + 121 + /* timer */ 122 + static struct timer_list udc_timer; 123 + static int stop_timer; 124 + 125 + /* set_rde -- Is used to control enabling of RX DMA. Problem is 126 + * that UDC has only one bit (RDE) to enable/disable RX DMA for 127 + * all OUT endpoints. So we have to handle race conditions like 128 + * when OUT data reaches the fifo but no request was queued yet. 129 + * This cannot be solved by letting the RX DMA disabled until a 130 + * request gets queued because there may be other OUT packets 131 + * in the FIFO (important for not blocking control traffic). 132 + * The value of set_rde controls the correspondig timer. 133 + * 134 + * set_rde -1 == not used, means it is alloed to be set to 0 or 1 135 + * set_rde 0 == do not touch RDE, do no start the RDE timer 136 + * set_rde 1 == timer function will look whether FIFO has data 137 + * set_rde 2 == set by timer function to enable RX DMA on next call 138 + */ 139 + static int set_rde = -1; 140 + 141 + static DECLARE_COMPLETION(on_exit); 142 + static struct timer_list udc_pollstall_timer; 143 + static int stop_pollstall_timer; 144 + static DECLARE_COMPLETION(on_pollstall_exit); 145 + 146 + /* tasklet for usb disconnect */ 147 + static DECLARE_TASKLET(disconnect_tasklet, udc_tasklet_disconnect, 148 + (unsigned long) &udc); 149 + 150 + 151 + /* endpoint names used for print */ 152 + static const char ep0_string[] = "ep0in"; 153 + static const char *ep_string[] = { 154 + ep0_string, 155 + "ep1in-int", "ep2in-bulk", "ep3in-bulk", "ep4in-bulk", "ep5in-bulk", 156 + "ep6in-bulk", "ep7in-bulk", "ep8in-bulk", "ep9in-bulk", "ep10in-bulk", 157 + "ep11in-bulk", "ep12in-bulk", "ep13in-bulk", "ep14in-bulk", 158 + "ep15in-bulk", "ep0out", "ep1out-bulk", "ep2out-bulk", "ep3out-bulk", 159 + "ep4out-bulk", "ep5out-bulk", "ep6out-bulk", "ep7out-bulk", 160 + "ep8out-bulk", "ep9out-bulk", "ep10out-bulk", "ep11out-bulk", 161 + "ep12out-bulk", "ep13out-bulk", "ep14out-bulk", "ep15out-bulk" 162 + }; 163 + 164 + /* DMA usage flag */ 165 + static int use_dma = 1; 166 + /* packet per buffer dma */ 167 + static int use_dma_ppb = 1; 168 + /* with per descr. update */ 169 + static int use_dma_ppb_du; 170 + /* buffer fill mode */ 171 + static int use_dma_bufferfill_mode; 172 + /* full speed only mode */ 173 + static int use_fullspeed; 174 + /* tx buffer size for high speed */ 175 + static unsigned long hs_tx_buf = UDC_EPIN_BUFF_SIZE; 176 + 177 + /* module parameters */ 178 + module_param(use_dma, bool, S_IRUGO); 179 + MODULE_PARM_DESC(use_dma, "true for DMA"); 180 + module_param(use_dma_ppb, bool, S_IRUGO); 181 + MODULE_PARM_DESC(use_dma_ppb, "true for DMA in packet per buffer mode"); 182 + module_param(use_dma_ppb_du, bool, S_IRUGO); 183 + MODULE_PARM_DESC(use_dma_ppb_du, 184 + "true for DMA in packet per buffer mode with descriptor update"); 185 + module_param(use_fullspeed, bool, S_IRUGO); 186 + MODULE_PARM_DESC(use_fullspeed, "true for fullspeed only"); 187 + 188 + /*---------------------------------------------------------------------------*/ 189 + /* Prints UDC device registers and endpoint irq registers */ 190 + static void print_regs(struct udc *dev) 191 + { 192 + DBG(dev, "------- Device registers -------\n"); 193 + DBG(dev, "dev config = %08x\n", readl(&dev->regs->cfg)); 194 + DBG(dev, "dev control = %08x\n", readl(&dev->regs->ctl)); 195 + DBG(dev, "dev status = %08x\n", readl(&dev->regs->sts)); 196 + DBG(dev, "\n"); 197 + DBG(dev, "dev int's = %08x\n", readl(&dev->regs->irqsts)); 198 + DBG(dev, "dev intmask = %08x\n", readl(&dev->regs->irqmsk)); 199 + DBG(dev, "\n"); 200 + DBG(dev, "dev ep int's = %08x\n", readl(&dev->regs->ep_irqsts)); 201 + DBG(dev, "dev ep intmask = %08x\n", readl(&dev->regs->ep_irqmsk)); 202 + DBG(dev, "\n"); 203 + DBG(dev, "USE DMA = %d\n", use_dma); 204 + if (use_dma && use_dma_ppb && !use_dma_ppb_du) { 205 + DBG(dev, "DMA mode = PPBNDU (packet per buffer " 206 + "WITHOUT desc. update)\n"); 207 + dev_info(&dev->pdev->dev, "DMA mode (%s)\n", "PPBNDU"); 208 + } else if (use_dma && use_dma_ppb_du && use_dma_ppb_du) { 209 + DBG(dev, "DMA mode = PPBDU (packet per buffer " 210 + "WITH desc. update)\n"); 211 + dev_info(&dev->pdev->dev, "DMA mode (%s)\n", "PPBDU"); 212 + } 213 + if (use_dma && use_dma_bufferfill_mode) { 214 + DBG(dev, "DMA mode = BF (buffer fill mode)\n"); 215 + dev_info(&dev->pdev->dev, "DMA mode (%s)\n", "BF"); 216 + } 217 + if (!use_dma) { 218 + dev_info(&dev->pdev->dev, "FIFO mode\n"); 219 + } 220 + DBG(dev, "-------------------------------------------------------\n"); 221 + } 222 + 223 + /* Masks unused interrupts */ 224 + static int udc_mask_unused_interrupts(struct udc *dev) 225 + { 226 + u32 tmp; 227 + 228 + /* mask all dev interrupts */ 229 + tmp = AMD_BIT(UDC_DEVINT_SVC) | 230 + AMD_BIT(UDC_DEVINT_ENUM) | 231 + AMD_BIT(UDC_DEVINT_US) | 232 + AMD_BIT(UDC_DEVINT_UR) | 233 + AMD_BIT(UDC_DEVINT_ES) | 234 + AMD_BIT(UDC_DEVINT_SI) | 235 + AMD_BIT(UDC_DEVINT_SOF)| 236 + AMD_BIT(UDC_DEVINT_SC); 237 + writel(tmp, &dev->regs->irqmsk); 238 + 239 + /* mask all ep interrupts */ 240 + writel(UDC_EPINT_MSK_DISABLE_ALL, &dev->regs->ep_irqmsk); 241 + 242 + return 0; 243 + } 244 + 245 + /* Enables endpoint 0 interrupts */ 246 + static int udc_enable_ep0_interrupts(struct udc *dev) 247 + { 248 + u32 tmp; 249 + 250 + DBG(dev, "udc_enable_ep0_interrupts()\n"); 251 + 252 + /* read irq mask */ 253 + tmp = readl(&dev->regs->ep_irqmsk); 254 + /* enable ep0 irq's */ 255 + tmp &= AMD_UNMASK_BIT(UDC_EPINT_IN_EP0) 256 + & AMD_UNMASK_BIT(UDC_EPINT_OUT_EP0); 257 + writel(tmp, &dev->regs->ep_irqmsk); 258 + 259 + return 0; 260 + } 261 + 262 + /* Enables device interrupts for SET_INTF and SET_CONFIG */ 263 + static int udc_enable_dev_setup_interrupts(struct udc *dev) 264 + { 265 + u32 tmp; 266 + 267 + DBG(dev, "enable device interrupts for setup data\n"); 268 + 269 + /* read irq mask */ 270 + tmp = readl(&dev->regs->irqmsk); 271 + 272 + /* enable SET_INTERFACE, SET_CONFIG and other needed irq's */ 273 + tmp &= AMD_UNMASK_BIT(UDC_DEVINT_SI) 274 + & AMD_UNMASK_BIT(UDC_DEVINT_SC) 275 + & AMD_UNMASK_BIT(UDC_DEVINT_UR) 276 + & AMD_UNMASK_BIT(UDC_DEVINT_SVC) 277 + & AMD_UNMASK_BIT(UDC_DEVINT_ENUM); 278 + writel(tmp, &dev->regs->irqmsk); 279 + 280 + return 0; 281 + } 282 + 283 + /* Calculates fifo start of endpoint based on preceeding endpoints */ 284 + static int udc_set_txfifo_addr(struct udc_ep *ep) 285 + { 286 + struct udc *dev; 287 + u32 tmp; 288 + int i; 289 + 290 + if (!ep || !(ep->in)) 291 + return -EINVAL; 292 + 293 + dev = ep->dev; 294 + ep->txfifo = dev->txfifo; 295 + 296 + /* traverse ep's */ 297 + for (i = 0; i < ep->num; i++) { 298 + if (dev->ep[i].regs) { 299 + /* read fifo size */ 300 + tmp = readl(&dev->ep[i].regs->bufin_framenum); 301 + tmp = AMD_GETBITS(tmp, UDC_EPIN_BUFF_SIZE); 302 + ep->txfifo += tmp; 303 + } 304 + } 305 + return 0; 306 + } 307 + 308 + /* CNAK pending field: bit0 = ep0in, bit16 = ep0out */ 309 + static u32 cnak_pending; 310 + 311 + static void UDC_QUEUE_CNAK(struct udc_ep *ep, unsigned num) 312 + { 313 + if (readl(&ep->regs->ctl) & AMD_BIT(UDC_EPCTL_NAK)) { 314 + DBG(ep->dev, "NAK could not be cleared for ep%d\n", num); 315 + cnak_pending |= 1 << (num); 316 + ep->naking = 1; 317 + } else 318 + cnak_pending = cnak_pending & (~(1 << (num))); 319 + } 320 + 321 + 322 + /* Enables endpoint, is called by gadget driver */ 323 + static int 324 + udc_ep_enable(struct usb_ep *usbep, const struct usb_endpoint_descriptor *desc) 325 + { 326 + struct udc_ep *ep; 327 + struct udc *dev; 328 + u32 tmp; 329 + unsigned long iflags; 330 + u8 udc_csr_epix; 331 + 332 + if (!usbep 333 + || usbep->name == ep0_string 334 + || !desc 335 + || desc->bDescriptorType != USB_DT_ENDPOINT) 336 + return -EINVAL; 337 + 338 + ep = container_of(usbep, struct udc_ep, ep); 339 + dev = ep->dev; 340 + 341 + DBG(dev, "udc_ep_enable() ep %d\n", ep->num); 342 + 343 + if (!dev->driver || dev->gadget.speed == USB_SPEED_UNKNOWN) 344 + return -ESHUTDOWN; 345 + 346 + spin_lock_irqsave(&dev->lock, iflags); 347 + ep->desc = desc; 348 + 349 + ep->halted = 0; 350 + 351 + /* set traffic type */ 352 + tmp = readl(&dev->ep[ep->num].regs->ctl); 353 + tmp = AMD_ADDBITS(tmp, desc->bmAttributes, UDC_EPCTL_ET); 354 + writel(tmp, &dev->ep[ep->num].regs->ctl); 355 + 356 + /* set max packet size */ 357 + tmp = readl(&dev->ep[ep->num].regs->bufout_maxpkt); 358 + tmp = AMD_ADDBITS(tmp, desc->wMaxPacketSize, UDC_EP_MAX_PKT_SIZE); 359 + ep->ep.maxpacket = desc->wMaxPacketSize; 360 + writel(tmp, &dev->ep[ep->num].regs->bufout_maxpkt); 361 + 362 + /* IN ep */ 363 + if (ep->in) { 364 + 365 + /* ep ix in UDC CSR register space */ 366 + udc_csr_epix = ep->num; 367 + 368 + /* set buffer size (tx fifo entries) */ 369 + tmp = readl(&dev->ep[ep->num].regs->bufin_framenum); 370 + /* double buffering: fifo size = 2 x max packet size */ 371 + tmp = AMD_ADDBITS( 372 + tmp, 373 + desc->wMaxPacketSize * UDC_EPIN_BUFF_SIZE_MULT 374 + / UDC_DWORD_BYTES, 375 + UDC_EPIN_BUFF_SIZE); 376 + writel(tmp, &dev->ep[ep->num].regs->bufin_framenum); 377 + 378 + /* calc. tx fifo base addr */ 379 + udc_set_txfifo_addr(ep); 380 + 381 + /* flush fifo */ 382 + tmp = readl(&ep->regs->ctl); 383 + tmp |= AMD_BIT(UDC_EPCTL_F); 384 + writel(tmp, &ep->regs->ctl); 385 + 386 + /* OUT ep */ 387 + } else { 388 + /* ep ix in UDC CSR register space */ 389 + udc_csr_epix = ep->num - UDC_CSR_EP_OUT_IX_OFS; 390 + 391 + /* set max packet size UDC CSR */ 392 + tmp = readl(&dev->csr->ne[ep->num - UDC_CSR_EP_OUT_IX_OFS]); 393 + tmp = AMD_ADDBITS(tmp, desc->wMaxPacketSize, 394 + UDC_CSR_NE_MAX_PKT); 395 + writel(tmp, &dev->csr->ne[ep->num - UDC_CSR_EP_OUT_IX_OFS]); 396 + 397 + if (use_dma && !ep->in) { 398 + /* alloc and init BNA dummy request */ 399 + ep->bna_dummy_req = udc_alloc_bna_dummy(ep); 400 + ep->bna_occurred = 0; 401 + } 402 + 403 + if (ep->num != UDC_EP0OUT_IX) 404 + dev->data_ep_enabled = 1; 405 + } 406 + 407 + /* set ep values */ 408 + tmp = readl(&dev->csr->ne[udc_csr_epix]); 409 + /* max packet */ 410 + tmp = AMD_ADDBITS(tmp, desc->wMaxPacketSize, UDC_CSR_NE_MAX_PKT); 411 + /* ep number */ 412 + tmp = AMD_ADDBITS(tmp, desc->bEndpointAddress, UDC_CSR_NE_NUM); 413 + /* ep direction */ 414 + tmp = AMD_ADDBITS(tmp, ep->in, UDC_CSR_NE_DIR); 415 + /* ep type */ 416 + tmp = AMD_ADDBITS(tmp, desc->bmAttributes, UDC_CSR_NE_TYPE); 417 + /* ep config */ 418 + tmp = AMD_ADDBITS(tmp, ep->dev->cur_config, UDC_CSR_NE_CFG); 419 + /* ep interface */ 420 + tmp = AMD_ADDBITS(tmp, ep->dev->cur_intf, UDC_CSR_NE_INTF); 421 + /* ep alt */ 422 + tmp = AMD_ADDBITS(tmp, ep->dev->cur_alt, UDC_CSR_NE_ALT); 423 + /* write reg */ 424 + writel(tmp, &dev->csr->ne[udc_csr_epix]); 425 + 426 + /* enable ep irq */ 427 + tmp = readl(&dev->regs->ep_irqmsk); 428 + tmp &= AMD_UNMASK_BIT(ep->num); 429 + writel(tmp, &dev->regs->ep_irqmsk); 430 + 431 + /* 432 + * clear NAK by writing CNAK 433 + * avoid BNA for OUT DMA, don't clear NAK until DMA desc. written 434 + */ 435 + if (!use_dma || ep->in) { 436 + tmp = readl(&ep->regs->ctl); 437 + tmp |= AMD_BIT(UDC_EPCTL_CNAK); 438 + writel(tmp, &ep->regs->ctl); 439 + ep->naking = 0; 440 + UDC_QUEUE_CNAK(ep, ep->num); 441 + } 442 + tmp = desc->bEndpointAddress; 443 + DBG(dev, "%s enabled\n", usbep->name); 444 + 445 + spin_unlock_irqrestore(&dev->lock, iflags); 446 + return 0; 447 + } 448 + 449 + /* Resets endpoint */ 450 + static void ep_init(struct udc_regs __iomem *regs, struct udc_ep *ep) 451 + { 452 + u32 tmp; 453 + 454 + VDBG(ep->dev, "ep-%d reset\n", ep->num); 455 + ep->desc = NULL; 456 + ep->ep.ops = &udc_ep_ops; 457 + INIT_LIST_HEAD(&ep->queue); 458 + 459 + ep->ep.maxpacket = (u16) ~0; 460 + /* set NAK */ 461 + tmp = readl(&ep->regs->ctl); 462 + tmp |= AMD_BIT(UDC_EPCTL_SNAK); 463 + writel(tmp, &ep->regs->ctl); 464 + ep->naking = 1; 465 + 466 + /* disable interrupt */ 467 + tmp = readl(&regs->ep_irqmsk); 468 + tmp |= AMD_BIT(ep->num); 469 + writel(tmp, &regs->ep_irqmsk); 470 + 471 + if (ep->in) { 472 + /* unset P and IN bit of potential former DMA */ 473 + tmp = readl(&ep->regs->ctl); 474 + tmp &= AMD_UNMASK_BIT(UDC_EPCTL_P); 475 + writel(tmp, &ep->regs->ctl); 476 + 477 + tmp = readl(&ep->regs->sts); 478 + tmp |= AMD_BIT(UDC_EPSTS_IN); 479 + writel(tmp, &ep->regs->sts); 480 + 481 + /* flush the fifo */ 482 + tmp = readl(&ep->regs->ctl); 483 + tmp |= AMD_BIT(UDC_EPCTL_F); 484 + writel(tmp, &ep->regs->ctl); 485 + 486 + } 487 + /* reset desc pointer */ 488 + writel(0, &ep->regs->desptr); 489 + } 490 + 491 + /* Disables endpoint, is called by gadget driver */ 492 + static int udc_ep_disable(struct usb_ep *usbep) 493 + { 494 + struct udc_ep *ep = NULL; 495 + unsigned long iflags; 496 + 497 + if (!usbep) 498 + return -EINVAL; 499 + 500 + ep = container_of(usbep, struct udc_ep, ep); 501 + if (usbep->name == ep0_string || !ep->desc) 502 + return -EINVAL; 503 + 504 + DBG(ep->dev, "Disable ep-%d\n", ep->num); 505 + 506 + spin_lock_irqsave(&ep->dev->lock, iflags); 507 + udc_free_request(&ep->ep, &ep->bna_dummy_req->req); 508 + empty_req_queue(ep); 509 + ep_init(ep->dev->regs, ep); 510 + spin_unlock_irqrestore(&ep->dev->lock, iflags); 511 + 512 + return 0; 513 + } 514 + 515 + /* Allocates request packet, called by gadget driver */ 516 + static struct usb_request * 517 + udc_alloc_request(struct usb_ep *usbep, gfp_t gfp) 518 + { 519 + struct udc_request *req; 520 + struct udc_data_dma *dma_desc; 521 + struct udc_ep *ep; 522 + 523 + if (!usbep) 524 + return NULL; 525 + 526 + ep = container_of(usbep, struct udc_ep, ep); 527 + 528 + VDBG(ep->dev, "udc_alloc_req(): ep%d\n", ep->num); 529 + req = kzalloc(sizeof(struct udc_request), gfp); 530 + if (!req) 531 + return NULL; 532 + 533 + req->req.dma = DMA_DONT_USE; 534 + INIT_LIST_HEAD(&req->queue); 535 + 536 + if (ep->dma) { 537 + /* ep0 in requests are allocated from data pool here */ 538 + dma_desc = pci_pool_alloc(ep->dev->data_requests, gfp, 539 + &req->td_phys); 540 + if (!dma_desc) { 541 + kfree(req); 542 + return NULL; 543 + } 544 + 545 + VDBG(ep->dev, "udc_alloc_req: req = %p dma_desc = %p, " 546 + "td_phys = %lx\n", 547 + req, dma_desc, 548 + (unsigned long)req->td_phys); 549 + /* prevent from using desc. - set HOST BUSY */ 550 + dma_desc->status = AMD_ADDBITS(dma_desc->status, 551 + UDC_DMA_STP_STS_BS_HOST_BUSY, 552 + UDC_DMA_STP_STS_BS); 553 + dma_desc->bufptr = __constant_cpu_to_le32(DMA_DONT_USE); 554 + req->td_data = dma_desc; 555 + req->td_data_last = NULL; 556 + req->chain_len = 1; 557 + } 558 + 559 + return &req->req; 560 + } 561 + 562 + /* Frees request packet, called by gadget driver */ 563 + static void 564 + udc_free_request(struct usb_ep *usbep, struct usb_request *usbreq) 565 + { 566 + struct udc_ep *ep; 567 + struct udc_request *req; 568 + 569 + if (!usbep || !usbreq) 570 + return; 571 + 572 + ep = container_of(usbep, struct udc_ep, ep); 573 + req = container_of(usbreq, struct udc_request, req); 574 + VDBG(ep->dev, "free_req req=%p\n", req); 575 + BUG_ON(!list_empty(&req->queue)); 576 + if (req->td_data) { 577 + VDBG(ep->dev, "req->td_data=%p\n", req->td_data); 578 + 579 + /* free dma chain if created */ 580 + if (req->chain_len > 1) { 581 + udc_free_dma_chain(ep->dev, req); 582 + } 583 + 584 + pci_pool_free(ep->dev->data_requests, req->td_data, 585 + req->td_phys); 586 + } 587 + kfree(req); 588 + } 589 + 590 + /* Init BNA dummy descriptor for HOST BUSY and pointing to itself */ 591 + static void udc_init_bna_dummy(struct udc_request *req) 592 + { 593 + if (req) { 594 + /* set last bit */ 595 + req->td_data->status |= AMD_BIT(UDC_DMA_IN_STS_L); 596 + /* set next pointer to itself */ 597 + req->td_data->next = req->td_phys; 598 + /* set HOST BUSY */ 599 + req->td_data->status 600 + = AMD_ADDBITS(req->td_data->status, 601 + UDC_DMA_STP_STS_BS_DMA_DONE, 602 + UDC_DMA_STP_STS_BS); 603 + #ifdef UDC_VERBOSE 604 + pr_debug("bna desc = %p, sts = %08x\n", 605 + req->td_data, req->td_data->status); 606 + #endif 607 + } 608 + } 609 + 610 + /* Allocate BNA dummy descriptor */ 611 + static struct udc_request *udc_alloc_bna_dummy(struct udc_ep *ep) 612 + { 613 + struct udc_request *req = NULL; 614 + struct usb_request *_req = NULL; 615 + 616 + /* alloc the dummy request */ 617 + _req = udc_alloc_request(&ep->ep, GFP_ATOMIC); 618 + if (_req) { 619 + req = container_of(_req, struct udc_request, req); 620 + ep->bna_dummy_req = req; 621 + udc_init_bna_dummy(req); 622 + } 623 + return req; 624 + } 625 + 626 + /* Write data to TX fifo for IN packets */ 627 + static void 628 + udc_txfifo_write(struct udc_ep *ep, struct usb_request *req) 629 + { 630 + u8 *req_buf; 631 + u32 *buf; 632 + int i, j; 633 + unsigned bytes = 0; 634 + unsigned remaining = 0; 635 + 636 + if (!req || !ep) 637 + return; 638 + 639 + req_buf = req->buf + req->actual; 640 + prefetch(req_buf); 641 + remaining = req->length - req->actual; 642 + 643 + buf = (u32 *) req_buf; 644 + 645 + bytes = ep->ep.maxpacket; 646 + if (bytes > remaining) 647 + bytes = remaining; 648 + 649 + /* dwords first */ 650 + for (i = 0; i < bytes / UDC_DWORD_BYTES; i++) { 651 + writel(*(buf + i), ep->txfifo); 652 + } 653 + 654 + /* remaining bytes must be written by byte access */ 655 + for (j = 0; j < bytes % UDC_DWORD_BYTES; j++) { 656 + writeb((u8)(*(buf + i) >> (j << UDC_BITS_PER_BYTE_SHIFT)), 657 + ep->txfifo); 658 + } 659 + 660 + /* dummy write confirm */ 661 + writel(0, &ep->regs->confirm); 662 + } 663 + 664 + /* Read dwords from RX fifo for OUT transfers */ 665 + static int udc_rxfifo_read_dwords(struct udc *dev, u32 *buf, int dwords) 666 + { 667 + int i; 668 + 669 + VDBG(dev, "udc_read_dwords(): %d dwords\n", dwords); 670 + 671 + for (i = 0; i < dwords; i++) { 672 + *(buf + i) = readl(dev->rxfifo); 673 + } 674 + return 0; 675 + } 676 + 677 + /* Read bytes from RX fifo for OUT transfers */ 678 + static int udc_rxfifo_read_bytes(struct udc *dev, u8 *buf, int bytes) 679 + { 680 + int i, j; 681 + u32 tmp; 682 + 683 + VDBG(dev, "udc_read_bytes(): %d bytes\n", bytes); 684 + 685 + /* dwords first */ 686 + for (i = 0; i < bytes / UDC_DWORD_BYTES; i++) { 687 + *((u32 *)(buf + (i<<2))) = readl(dev->rxfifo); 688 + } 689 + 690 + /* remaining bytes must be read by byte access */ 691 + if (bytes % UDC_DWORD_BYTES) { 692 + tmp = readl(dev->rxfifo); 693 + for (j = 0; j < bytes % UDC_DWORD_BYTES; j++) { 694 + *(buf + (i<<2) + j) = (u8)(tmp & UDC_BYTE_MASK); 695 + tmp = tmp >> UDC_BITS_PER_BYTE; 696 + } 697 + } 698 + 699 + return 0; 700 + } 701 + 702 + /* Read data from RX fifo for OUT transfers */ 703 + static int 704 + udc_rxfifo_read(struct udc_ep *ep, struct udc_request *req) 705 + { 706 + u8 *buf; 707 + unsigned buf_space; 708 + unsigned bytes = 0; 709 + unsigned finished = 0; 710 + 711 + /* received number bytes */ 712 + bytes = readl(&ep->regs->sts); 713 + bytes = AMD_GETBITS(bytes, UDC_EPSTS_RX_PKT_SIZE); 714 + 715 + buf_space = req->req.length - req->req.actual; 716 + buf = req->req.buf + req->req.actual; 717 + if (bytes > buf_space) { 718 + if ((buf_space % ep->ep.maxpacket) != 0) { 719 + DBG(ep->dev, 720 + "%s: rx %d bytes, rx-buf space = %d bytesn\n", 721 + ep->ep.name, bytes, buf_space); 722 + req->req.status = -EOVERFLOW; 723 + } 724 + bytes = buf_space; 725 + } 726 + req->req.actual += bytes; 727 + 728 + /* last packet ? */ 729 + if (((bytes % ep->ep.maxpacket) != 0) || (!bytes) 730 + || ((req->req.actual == req->req.length) && !req->req.zero)) 731 + finished = 1; 732 + 733 + /* read rx fifo bytes */ 734 + VDBG(ep->dev, "ep %s: rxfifo read %d bytes\n", ep->ep.name, bytes); 735 + udc_rxfifo_read_bytes(ep->dev, buf, bytes); 736 + 737 + return finished; 738 + } 739 + 740 + /* create/re-init a DMA descriptor or a DMA descriptor chain */ 741 + static int prep_dma(struct udc_ep *ep, struct udc_request *req, gfp_t gfp) 742 + { 743 + int retval = 0; 744 + u32 tmp; 745 + 746 + VDBG(ep->dev, "prep_dma\n"); 747 + VDBG(ep->dev, "prep_dma ep%d req->td_data=%p\n", 748 + ep->num, req->td_data); 749 + 750 + /* set buffer pointer */ 751 + req->td_data->bufptr = req->req.dma; 752 + 753 + /* set last bit */ 754 + req->td_data->status |= AMD_BIT(UDC_DMA_IN_STS_L); 755 + 756 + /* build/re-init dma chain if maxpkt scatter mode, not for EP0 */ 757 + if (use_dma_ppb) { 758 + 759 + retval = udc_create_dma_chain(ep, req, ep->ep.maxpacket, gfp); 760 + if (retval != 0) { 761 + if (retval == -ENOMEM) 762 + DBG(ep->dev, "Out of DMA memory\n"); 763 + return retval; 764 + } 765 + if (ep->in) { 766 + if (req->req.length == ep->ep.maxpacket) { 767 + /* write tx bytes */ 768 + req->td_data->status = 769 + AMD_ADDBITS(req->td_data->status, 770 + ep->ep.maxpacket, 771 + UDC_DMA_IN_STS_TXBYTES); 772 + 773 + } 774 + } 775 + 776 + } 777 + 778 + if (ep->in) { 779 + VDBG(ep->dev, "IN: use_dma_ppb=%d req->req.len=%d " 780 + "maxpacket=%d ep%d\n", 781 + use_dma_ppb, req->req.length, 782 + ep->ep.maxpacket, ep->num); 783 + /* 784 + * if bytes < max packet then tx bytes must 785 + * be written in packet per buffer mode 786 + */ 787 + if (!use_dma_ppb || req->req.length < ep->ep.maxpacket 788 + || ep->num == UDC_EP0OUT_IX 789 + || ep->num == UDC_EP0IN_IX) { 790 + /* write tx bytes */ 791 + req->td_data->status = 792 + AMD_ADDBITS(req->td_data->status, 793 + req->req.length, 794 + UDC_DMA_IN_STS_TXBYTES); 795 + /* reset frame num */ 796 + req->td_data->status = 797 + AMD_ADDBITS(req->td_data->status, 798 + 0, 799 + UDC_DMA_IN_STS_FRAMENUM); 800 + } 801 + /* set HOST BUSY */ 802 + req->td_data->status = 803 + AMD_ADDBITS(req->td_data->status, 804 + UDC_DMA_STP_STS_BS_HOST_BUSY, 805 + UDC_DMA_STP_STS_BS); 806 + } else { 807 + VDBG(ep->dev, "OUT set host ready\n"); 808 + /* set HOST READY */ 809 + req->td_data->status = 810 + AMD_ADDBITS(req->td_data->status, 811 + UDC_DMA_STP_STS_BS_HOST_READY, 812 + UDC_DMA_STP_STS_BS); 813 + 814 + 815 + /* clear NAK by writing CNAK */ 816 + if (ep->naking) { 817 + tmp = readl(&ep->regs->ctl); 818 + tmp |= AMD_BIT(UDC_EPCTL_CNAK); 819 + writel(tmp, &ep->regs->ctl); 820 + ep->naking = 0; 821 + UDC_QUEUE_CNAK(ep, ep->num); 822 + } 823 + 824 + } 825 + 826 + return retval; 827 + } 828 + 829 + /* Completes request packet ... caller MUST hold lock */ 830 + static void 831 + complete_req(struct udc_ep *ep, struct udc_request *req, int sts) 832 + __releases(ep->dev->lock) 833 + __acquires(ep->dev->lock) 834 + { 835 + struct udc *dev; 836 + unsigned halted; 837 + 838 + VDBG(ep->dev, "complete_req(): ep%d\n", ep->num); 839 + 840 + dev = ep->dev; 841 + /* unmap DMA */ 842 + if (req->dma_mapping) { 843 + if (ep->in) 844 + pci_unmap_single(dev->pdev, 845 + req->req.dma, 846 + req->req.length, 847 + PCI_DMA_TODEVICE); 848 + else 849 + pci_unmap_single(dev->pdev, 850 + req->req.dma, 851 + req->req.length, 852 + PCI_DMA_FROMDEVICE); 853 + req->dma_mapping = 0; 854 + req->req.dma = DMA_DONT_USE; 855 + } 856 + 857 + halted = ep->halted; 858 + ep->halted = 1; 859 + 860 + /* set new status if pending */ 861 + if (req->req.status == -EINPROGRESS) 862 + req->req.status = sts; 863 + 864 + /* remove from ep queue */ 865 + list_del_init(&req->queue); 866 + 867 + VDBG(ep->dev, "req %p => complete %d bytes at %s with sts %d\n", 868 + &req->req, req->req.length, ep->ep.name, sts); 869 + 870 + spin_unlock(&dev->lock); 871 + req->req.complete(&ep->ep, &req->req); 872 + spin_lock(&dev->lock); 873 + ep->halted = halted; 874 + } 875 + 876 + /* frees pci pool descriptors of a DMA chain */ 877 + static int udc_free_dma_chain(struct udc *dev, struct udc_request *req) 878 + { 879 + 880 + int ret_val = 0; 881 + struct udc_data_dma *td; 882 + struct udc_data_dma *td_last = NULL; 883 + unsigned int i; 884 + 885 + DBG(dev, "free chain req = %p\n", req); 886 + 887 + /* do not free first desc., will be done by free for request */ 888 + td_last = req->td_data; 889 + td = phys_to_virt(td_last->next); 890 + 891 + for (i = 1; i < req->chain_len; i++) { 892 + 893 + pci_pool_free(dev->data_requests, td, 894 + (dma_addr_t) td_last->next); 895 + td_last = td; 896 + td = phys_to_virt(td_last->next); 897 + } 898 + 899 + return ret_val; 900 + } 901 + 902 + /* Iterates to the end of a DMA chain and returns last descriptor */ 903 + static struct udc_data_dma *udc_get_last_dma_desc(struct udc_request *req) 904 + { 905 + struct udc_data_dma *td; 906 + 907 + td = req->td_data; 908 + while (td && !(td->status & AMD_BIT(UDC_DMA_IN_STS_L))) { 909 + td = phys_to_virt(td->next); 910 + } 911 + 912 + return td; 913 + 914 + } 915 + 916 + /* Iterates to the end of a DMA chain and counts bytes received */ 917 + static u32 udc_get_ppbdu_rxbytes(struct udc_request *req) 918 + { 919 + struct udc_data_dma *td; 920 + u32 count; 921 + 922 + td = req->td_data; 923 + /* received number bytes */ 924 + count = AMD_GETBITS(td->status, UDC_DMA_OUT_STS_RXBYTES); 925 + 926 + while (td && !(td->status & AMD_BIT(UDC_DMA_IN_STS_L))) { 927 + td = phys_to_virt(td->next); 928 + /* received number bytes */ 929 + if (td) { 930 + count += AMD_GETBITS(td->status, 931 + UDC_DMA_OUT_STS_RXBYTES); 932 + } 933 + } 934 + 935 + return count; 936 + 937 + } 938 + 939 + /* Creates or re-inits a DMA chain */ 940 + static int udc_create_dma_chain( 941 + struct udc_ep *ep, 942 + struct udc_request *req, 943 + unsigned long buf_len, gfp_t gfp_flags 944 + ) 945 + { 946 + unsigned long bytes = req->req.length; 947 + unsigned int i; 948 + dma_addr_t dma_addr; 949 + struct udc_data_dma *td = NULL; 950 + struct udc_data_dma *last = NULL; 951 + unsigned long txbytes; 952 + unsigned create_new_chain = 0; 953 + unsigned len; 954 + 955 + VDBG(ep->dev, "udc_create_dma_chain: bytes=%ld buf_len=%ld\n", 956 + bytes, buf_len); 957 + dma_addr = DMA_DONT_USE; 958 + 959 + /* unset L bit in first desc for OUT */ 960 + if (!ep->in) { 961 + req->td_data->status &= AMD_CLEAR_BIT(UDC_DMA_IN_STS_L); 962 + } 963 + 964 + /* alloc only new desc's if not already available */ 965 + len = req->req.length / ep->ep.maxpacket; 966 + if (req->req.length % ep->ep.maxpacket) { 967 + len++; 968 + } 969 + 970 + if (len > req->chain_len) { 971 + /* shorter chain already allocated before */ 972 + if (req->chain_len > 1) { 973 + udc_free_dma_chain(ep->dev, req); 974 + } 975 + req->chain_len = len; 976 + create_new_chain = 1; 977 + } 978 + 979 + td = req->td_data; 980 + /* gen. required number of descriptors and buffers */ 981 + for (i = buf_len; i < bytes; i += buf_len) { 982 + /* create or determine next desc. */ 983 + if (create_new_chain) { 984 + 985 + td = pci_pool_alloc(ep->dev->data_requests, 986 + gfp_flags, &dma_addr); 987 + if (!td) 988 + return -ENOMEM; 989 + 990 + td->status = 0; 991 + } else if (i == buf_len) { 992 + /* first td */ 993 + td = (struct udc_data_dma *) phys_to_virt( 994 + req->td_data->next); 995 + td->status = 0; 996 + } else { 997 + td = (struct udc_data_dma *) phys_to_virt(last->next); 998 + td->status = 0; 999 + } 1000 + 1001 + 1002 + if (td) 1003 + td->bufptr = req->req.dma + i; /* assign buffer */ 1004 + else 1005 + break; 1006 + 1007 + /* short packet ? */ 1008 + if ((bytes - i) >= buf_len) { 1009 + txbytes = buf_len; 1010 + } else { 1011 + /* short packet */ 1012 + txbytes = bytes - i; 1013 + } 1014 + 1015 + /* link td and assign tx bytes */ 1016 + if (i == buf_len) { 1017 + if (create_new_chain) { 1018 + req->td_data->next = dma_addr; 1019 + } else { 1020 + /* req->td_data->next = virt_to_phys(td); */ 1021 + } 1022 + /* write tx bytes */ 1023 + if (ep->in) { 1024 + /* first desc */ 1025 + req->td_data->status = 1026 + AMD_ADDBITS(req->td_data->status, 1027 + ep->ep.maxpacket, 1028 + UDC_DMA_IN_STS_TXBYTES); 1029 + /* second desc */ 1030 + td->status = AMD_ADDBITS(td->status, 1031 + txbytes, 1032 + UDC_DMA_IN_STS_TXBYTES); 1033 + } 1034 + } else { 1035 + if (create_new_chain) { 1036 + last->next = dma_addr; 1037 + } else { 1038 + /* last->next = virt_to_phys(td); */ 1039 + } 1040 + if (ep->in) { 1041 + /* write tx bytes */ 1042 + td->status = AMD_ADDBITS(td->status, 1043 + txbytes, 1044 + UDC_DMA_IN_STS_TXBYTES); 1045 + } 1046 + } 1047 + last = td; 1048 + } 1049 + /* set last bit */ 1050 + if (td) { 1051 + td->status |= AMD_BIT(UDC_DMA_IN_STS_L); 1052 + /* last desc. points to itself */ 1053 + req->td_data_last = td; 1054 + } 1055 + 1056 + return 0; 1057 + } 1058 + 1059 + /* Enabling RX DMA */ 1060 + static void udc_set_rde(struct udc *dev) 1061 + { 1062 + u32 tmp; 1063 + 1064 + VDBG(dev, "udc_set_rde()\n"); 1065 + /* stop RDE timer */ 1066 + if (timer_pending(&udc_timer)) { 1067 + set_rde = 0; 1068 + mod_timer(&udc_timer, jiffies - 1); 1069 + } 1070 + /* set RDE */ 1071 + tmp = readl(&dev->regs->ctl); 1072 + tmp |= AMD_BIT(UDC_DEVCTL_RDE); 1073 + writel(tmp, &dev->regs->ctl); 1074 + } 1075 + 1076 + /* Queues a request packet, called by gadget driver */ 1077 + static int 1078 + udc_queue(struct usb_ep *usbep, struct usb_request *usbreq, gfp_t gfp) 1079 + { 1080 + int retval = 0; 1081 + u8 open_rxfifo = 0; 1082 + unsigned long iflags; 1083 + struct udc_ep *ep; 1084 + struct udc_request *req; 1085 + struct udc *dev; 1086 + u32 tmp; 1087 + 1088 + /* check the inputs */ 1089 + req = container_of(usbreq, struct udc_request, req); 1090 + 1091 + if (!usbep || !usbreq || !usbreq->complete || !usbreq->buf 1092 + || !list_empty(&req->queue)) 1093 + return -EINVAL; 1094 + 1095 + ep = container_of(usbep, struct udc_ep, ep); 1096 + if (!ep->desc && (ep->num != 0 && ep->num != UDC_EP0OUT_IX)) 1097 + return -EINVAL; 1098 + 1099 + VDBG(ep->dev, "udc_queue(): ep%d-in=%d\n", ep->num, ep->in); 1100 + dev = ep->dev; 1101 + 1102 + if (!dev->driver || dev->gadget.speed == USB_SPEED_UNKNOWN) 1103 + return -ESHUTDOWN; 1104 + 1105 + /* map dma (usually done before) */ 1106 + if (ep->dma && usbreq->length != 0 1107 + && (usbreq->dma == DMA_DONT_USE || usbreq->dma == 0)) { 1108 + VDBG(dev, "DMA map req %p\n", req); 1109 + if (ep->in) 1110 + usbreq->dma = pci_map_single(dev->pdev, 1111 + usbreq->buf, 1112 + usbreq->length, 1113 + PCI_DMA_TODEVICE); 1114 + else 1115 + usbreq->dma = pci_map_single(dev->pdev, 1116 + usbreq->buf, 1117 + usbreq->length, 1118 + PCI_DMA_FROMDEVICE); 1119 + req->dma_mapping = 1; 1120 + } 1121 + 1122 + VDBG(dev, "%s queue req %p, len %d req->td_data=%p buf %p\n", 1123 + usbep->name, usbreq, usbreq->length, 1124 + req->td_data, usbreq->buf); 1125 + 1126 + spin_lock_irqsave(&dev->lock, iflags); 1127 + usbreq->actual = 0; 1128 + usbreq->status = -EINPROGRESS; 1129 + req->dma_done = 0; 1130 + 1131 + /* on empty queue just do first transfer */ 1132 + if (list_empty(&ep->queue)) { 1133 + /* zlp */ 1134 + if (usbreq->length == 0) { 1135 + /* IN zlp's are handled by hardware */ 1136 + complete_req(ep, req, 0); 1137 + VDBG(dev, "%s: zlp\n", ep->ep.name); 1138 + /* 1139 + * if set_config or set_intf is waiting for ack by zlp 1140 + * then set CSR_DONE 1141 + */ 1142 + if (dev->set_cfg_not_acked) { 1143 + tmp = readl(&dev->regs->ctl); 1144 + tmp |= AMD_BIT(UDC_DEVCTL_CSR_DONE); 1145 + writel(tmp, &dev->regs->ctl); 1146 + dev->set_cfg_not_acked = 0; 1147 + } 1148 + /* setup command is ACK'ed now by zlp */ 1149 + if (dev->waiting_zlp_ack_ep0in) { 1150 + /* clear NAK by writing CNAK in EP0_IN */ 1151 + tmp = readl(&dev->ep[UDC_EP0IN_IX].regs->ctl); 1152 + tmp |= AMD_BIT(UDC_EPCTL_CNAK); 1153 + writel(tmp, &dev->ep[UDC_EP0IN_IX].regs->ctl); 1154 + dev->ep[UDC_EP0IN_IX].naking = 0; 1155 + UDC_QUEUE_CNAK(&dev->ep[UDC_EP0IN_IX], 1156 + UDC_EP0IN_IX); 1157 + dev->waiting_zlp_ack_ep0in = 0; 1158 + } 1159 + goto finished; 1160 + } 1161 + if (ep->dma) { 1162 + retval = prep_dma(ep, req, gfp); 1163 + if (retval != 0) 1164 + goto finished; 1165 + /* write desc pointer to enable DMA */ 1166 + if (ep->in) { 1167 + /* set HOST READY */ 1168 + req->td_data->status = 1169 + AMD_ADDBITS(req->td_data->status, 1170 + UDC_DMA_IN_STS_BS_HOST_READY, 1171 + UDC_DMA_IN_STS_BS); 1172 + } 1173 + 1174 + /* disabled rx dma while descriptor update */ 1175 + if (!ep->in) { 1176 + /* stop RDE timer */ 1177 + if (timer_pending(&udc_timer)) { 1178 + set_rde = 0; 1179 + mod_timer(&udc_timer, jiffies - 1); 1180 + } 1181 + /* clear RDE */ 1182 + tmp = readl(&dev->regs->ctl); 1183 + tmp &= AMD_UNMASK_BIT(UDC_DEVCTL_RDE); 1184 + writel(tmp, &dev->regs->ctl); 1185 + open_rxfifo = 1; 1186 + 1187 + /* 1188 + * if BNA occurred then let BNA dummy desc. 1189 + * point to current desc. 1190 + */ 1191 + if (ep->bna_occurred) { 1192 + VDBG(dev, "copy to BNA dummy desc.\n"); 1193 + memcpy(ep->bna_dummy_req->td_data, 1194 + req->td_data, 1195 + sizeof(struct udc_data_dma)); 1196 + } 1197 + } 1198 + /* write desc pointer */ 1199 + writel(req->td_phys, &ep->regs->desptr); 1200 + 1201 + /* clear NAK by writing CNAK */ 1202 + if (ep->naking) { 1203 + tmp = readl(&ep->regs->ctl); 1204 + tmp |= AMD_BIT(UDC_EPCTL_CNAK); 1205 + writel(tmp, &ep->regs->ctl); 1206 + ep->naking = 0; 1207 + UDC_QUEUE_CNAK(ep, ep->num); 1208 + } 1209 + 1210 + if (ep->in) { 1211 + /* enable ep irq */ 1212 + tmp = readl(&dev->regs->ep_irqmsk); 1213 + tmp &= AMD_UNMASK_BIT(ep->num); 1214 + writel(tmp, &dev->regs->ep_irqmsk); 1215 + } 1216 + } 1217 + 1218 + } else if (ep->dma) { 1219 + 1220 + /* 1221 + * prep_dma not used for OUT ep's, this is not possible 1222 + * for PPB modes, because of chain creation reasons 1223 + */ 1224 + if (ep->in) { 1225 + retval = prep_dma(ep, req, gfp); 1226 + if (retval != 0) 1227 + goto finished; 1228 + } 1229 + } 1230 + VDBG(dev, "list_add\n"); 1231 + /* add request to ep queue */ 1232 + if (req) { 1233 + 1234 + list_add_tail(&req->queue, &ep->queue); 1235 + 1236 + /* open rxfifo if out data queued */ 1237 + if (open_rxfifo) { 1238 + /* enable DMA */ 1239 + req->dma_going = 1; 1240 + udc_set_rde(dev); 1241 + if (ep->num != UDC_EP0OUT_IX) 1242 + dev->data_ep_queued = 1; 1243 + } 1244 + /* stop OUT naking */ 1245 + if (!ep->in) { 1246 + if (!use_dma && udc_rxfifo_pending) { 1247 + DBG(dev, "udc_queue(): pending bytes in" 1248 + "rxfifo after nyet\n"); 1249 + /* 1250 + * read pending bytes afer nyet: 1251 + * referring to isr 1252 + */ 1253 + if (udc_rxfifo_read(ep, req)) { 1254 + /* finish */ 1255 + complete_req(ep, req, 0); 1256 + } 1257 + udc_rxfifo_pending = 0; 1258 + 1259 + } 1260 + } 1261 + } 1262 + 1263 + finished: 1264 + spin_unlock_irqrestore(&dev->lock, iflags); 1265 + return retval; 1266 + } 1267 + 1268 + /* Empty request queue of an endpoint; caller holds spinlock */ 1269 + static void empty_req_queue(struct udc_ep *ep) 1270 + { 1271 + struct udc_request *req; 1272 + 1273 + ep->halted = 1; 1274 + while (!list_empty(&ep->queue)) { 1275 + req = list_entry(ep->queue.next, 1276 + struct udc_request, 1277 + queue); 1278 + complete_req(ep, req, -ESHUTDOWN); 1279 + } 1280 + } 1281 + 1282 + /* Dequeues a request packet, called by gadget driver */ 1283 + static int udc_dequeue(struct usb_ep *usbep, struct usb_request *usbreq) 1284 + { 1285 + struct udc_ep *ep; 1286 + struct udc_request *req; 1287 + unsigned halted; 1288 + unsigned long iflags; 1289 + 1290 + ep = container_of(usbep, struct udc_ep, ep); 1291 + if (!usbep || !usbreq || (!ep->desc && (ep->num != 0 1292 + && ep->num != UDC_EP0OUT_IX))) 1293 + return -EINVAL; 1294 + 1295 + req = container_of(usbreq, struct udc_request, req); 1296 + 1297 + spin_lock_irqsave(&ep->dev->lock, iflags); 1298 + halted = ep->halted; 1299 + ep->halted = 1; 1300 + /* request in processing or next one */ 1301 + if (ep->queue.next == &req->queue) { 1302 + if (ep->dma && req->dma_going) { 1303 + if (ep->in) 1304 + ep->cancel_transfer = 1; 1305 + else { 1306 + u32 tmp; 1307 + u32 dma_sts; 1308 + /* stop potential receive DMA */ 1309 + tmp = readl(&udc->regs->ctl); 1310 + writel(tmp & AMD_UNMASK_BIT(UDC_DEVCTL_RDE), 1311 + &udc->regs->ctl); 1312 + /* 1313 + * Cancel transfer later in ISR 1314 + * if descriptor was touched. 1315 + */ 1316 + dma_sts = AMD_GETBITS(req->td_data->status, 1317 + UDC_DMA_OUT_STS_BS); 1318 + if (dma_sts != UDC_DMA_OUT_STS_BS_HOST_READY) 1319 + ep->cancel_transfer = 1; 1320 + else { 1321 + udc_init_bna_dummy(ep->req); 1322 + writel(ep->bna_dummy_req->td_phys, 1323 + &ep->regs->desptr); 1324 + } 1325 + writel(tmp, &udc->regs->ctl); 1326 + } 1327 + } 1328 + } 1329 + complete_req(ep, req, -ECONNRESET); 1330 + ep->halted = halted; 1331 + 1332 + spin_unlock_irqrestore(&ep->dev->lock, iflags); 1333 + return 0; 1334 + } 1335 + 1336 + /* Halt or clear halt of endpoint */ 1337 + static int 1338 + udc_set_halt(struct usb_ep *usbep, int halt) 1339 + { 1340 + struct udc_ep *ep; 1341 + u32 tmp; 1342 + unsigned long iflags; 1343 + int retval = 0; 1344 + 1345 + if (!usbep) 1346 + return -EINVAL; 1347 + 1348 + pr_debug("set_halt %s: halt=%d\n", usbep->name, halt); 1349 + 1350 + ep = container_of(usbep, struct udc_ep, ep); 1351 + if (!ep->desc && (ep->num != 0 && ep->num != UDC_EP0OUT_IX)) 1352 + return -EINVAL; 1353 + if (!ep->dev->driver || ep->dev->gadget.speed == USB_SPEED_UNKNOWN) 1354 + return -ESHUTDOWN; 1355 + 1356 + spin_lock_irqsave(&udc_stall_spinlock, iflags); 1357 + /* halt or clear halt */ 1358 + if (halt) { 1359 + if (ep->num == 0) 1360 + ep->dev->stall_ep0in = 1; 1361 + else { 1362 + /* 1363 + * set STALL 1364 + * rxfifo empty not taken into acount 1365 + */ 1366 + tmp = readl(&ep->regs->ctl); 1367 + tmp |= AMD_BIT(UDC_EPCTL_S); 1368 + writel(tmp, &ep->regs->ctl); 1369 + ep->halted = 1; 1370 + 1371 + /* setup poll timer */ 1372 + if (!timer_pending(&udc_pollstall_timer)) { 1373 + udc_pollstall_timer.expires = jiffies + 1374 + HZ * UDC_POLLSTALL_TIMER_USECONDS 1375 + / (1000 * 1000); 1376 + if (!stop_pollstall_timer) { 1377 + DBG(ep->dev, "start polltimer\n"); 1378 + add_timer(&udc_pollstall_timer); 1379 + } 1380 + } 1381 + } 1382 + } else { 1383 + /* ep is halted by set_halt() before */ 1384 + if (ep->halted) { 1385 + tmp = readl(&ep->regs->ctl); 1386 + /* clear stall bit */ 1387 + tmp = tmp & AMD_CLEAR_BIT(UDC_EPCTL_S); 1388 + /* clear NAK by writing CNAK */ 1389 + tmp |= AMD_BIT(UDC_EPCTL_CNAK); 1390 + writel(tmp, &ep->regs->ctl); 1391 + ep->halted = 0; 1392 + UDC_QUEUE_CNAK(ep, ep->num); 1393 + } 1394 + } 1395 + spin_unlock_irqrestore(&udc_stall_spinlock, iflags); 1396 + return retval; 1397 + } 1398 + 1399 + /* gadget interface */ 1400 + static const struct usb_ep_ops udc_ep_ops = { 1401 + .enable = udc_ep_enable, 1402 + .disable = udc_ep_disable, 1403 + 1404 + .alloc_request = udc_alloc_request, 1405 + .free_request = udc_free_request, 1406 + 1407 + .queue = udc_queue, 1408 + .dequeue = udc_dequeue, 1409 + 1410 + .set_halt = udc_set_halt, 1411 + /* fifo ops not implemented */ 1412 + }; 1413 + 1414 + /*-------------------------------------------------------------------------*/ 1415 + 1416 + /* Get frame counter (not implemented) */ 1417 + static int udc_get_frame(struct usb_gadget *gadget) 1418 + { 1419 + return -EOPNOTSUPP; 1420 + } 1421 + 1422 + /* Remote wakeup gadget interface */ 1423 + static int udc_wakeup(struct usb_gadget *gadget) 1424 + { 1425 + struct udc *dev; 1426 + 1427 + if (!gadget) 1428 + return -EINVAL; 1429 + dev = container_of(gadget, struct udc, gadget); 1430 + udc_remote_wakeup(dev); 1431 + 1432 + return 0; 1433 + } 1434 + 1435 + /* gadget operations */ 1436 + static const struct usb_gadget_ops udc_ops = { 1437 + .wakeup = udc_wakeup, 1438 + .get_frame = udc_get_frame, 1439 + }; 1440 + 1441 + /* Setups endpoint parameters, adds endpoints to linked list */ 1442 + static void make_ep_lists(struct udc *dev) 1443 + { 1444 + /* make gadget ep lists */ 1445 + INIT_LIST_HEAD(&dev->gadget.ep_list); 1446 + list_add_tail(&dev->ep[UDC_EPIN_STATUS_IX].ep.ep_list, 1447 + &dev->gadget.ep_list); 1448 + list_add_tail(&dev->ep[UDC_EPIN_IX].ep.ep_list, 1449 + &dev->gadget.ep_list); 1450 + list_add_tail(&dev->ep[UDC_EPOUT_IX].ep.ep_list, 1451 + &dev->gadget.ep_list); 1452 + 1453 + /* fifo config */ 1454 + dev->ep[UDC_EPIN_STATUS_IX].fifo_depth = UDC_EPIN_SMALLINT_BUFF_SIZE; 1455 + if (dev->gadget.speed == USB_SPEED_FULL) 1456 + dev->ep[UDC_EPIN_IX].fifo_depth = UDC_FS_EPIN_BUFF_SIZE; 1457 + else if (dev->gadget.speed == USB_SPEED_HIGH) 1458 + dev->ep[UDC_EPIN_IX].fifo_depth = hs_tx_buf; 1459 + dev->ep[UDC_EPOUT_IX].fifo_depth = UDC_RXFIFO_SIZE; 1460 + } 1461 + 1462 + /* init registers at driver load time */ 1463 + static int startup_registers(struct udc *dev) 1464 + { 1465 + u32 tmp; 1466 + 1467 + /* init controller by soft reset */ 1468 + udc_soft_reset(dev); 1469 + 1470 + /* mask not needed interrupts */ 1471 + udc_mask_unused_interrupts(dev); 1472 + 1473 + /* put into initial config */ 1474 + udc_basic_init(dev); 1475 + /* link up all endpoints */ 1476 + udc_setup_endpoints(dev); 1477 + 1478 + /* program speed */ 1479 + tmp = readl(&dev->regs->cfg); 1480 + if (use_fullspeed) { 1481 + tmp = AMD_ADDBITS(tmp, UDC_DEVCFG_SPD_FS, UDC_DEVCFG_SPD); 1482 + } else { 1483 + tmp = AMD_ADDBITS(tmp, UDC_DEVCFG_SPD_HS, UDC_DEVCFG_SPD); 1484 + } 1485 + writel(tmp, &dev->regs->cfg); 1486 + 1487 + return 0; 1488 + } 1489 + 1490 + /* Inits UDC context */ 1491 + static void udc_basic_init(struct udc *dev) 1492 + { 1493 + u32 tmp; 1494 + 1495 + DBG(dev, "udc_basic_init()\n"); 1496 + 1497 + dev->gadget.speed = USB_SPEED_UNKNOWN; 1498 + 1499 + /* stop RDE timer */ 1500 + if (timer_pending(&udc_timer)) { 1501 + set_rde = 0; 1502 + mod_timer(&udc_timer, jiffies - 1); 1503 + } 1504 + /* stop poll stall timer */ 1505 + if (timer_pending(&udc_pollstall_timer)) { 1506 + mod_timer(&udc_pollstall_timer, jiffies - 1); 1507 + } 1508 + /* disable DMA */ 1509 + tmp = readl(&dev->regs->ctl); 1510 + tmp &= AMD_UNMASK_BIT(UDC_DEVCTL_RDE); 1511 + tmp &= AMD_UNMASK_BIT(UDC_DEVCTL_TDE); 1512 + writel(tmp, &dev->regs->ctl); 1513 + 1514 + /* enable dynamic CSR programming */ 1515 + tmp = readl(&dev->regs->cfg); 1516 + tmp |= AMD_BIT(UDC_DEVCFG_CSR_PRG); 1517 + /* set self powered */ 1518 + tmp |= AMD_BIT(UDC_DEVCFG_SP); 1519 + /* set remote wakeupable */ 1520 + tmp |= AMD_BIT(UDC_DEVCFG_RWKP); 1521 + writel(tmp, &dev->regs->cfg); 1522 + 1523 + make_ep_lists(dev); 1524 + 1525 + dev->data_ep_enabled = 0; 1526 + dev->data_ep_queued = 0; 1527 + } 1528 + 1529 + /* Sets initial endpoint parameters */ 1530 + static void udc_setup_endpoints(struct udc *dev) 1531 + { 1532 + struct udc_ep *ep; 1533 + u32 tmp; 1534 + u32 reg; 1535 + 1536 + DBG(dev, "udc_setup_endpoints()\n"); 1537 + 1538 + /* read enum speed */ 1539 + tmp = readl(&dev->regs->sts); 1540 + tmp = AMD_GETBITS(tmp, UDC_DEVSTS_ENUM_SPEED); 1541 + if (tmp == UDC_DEVSTS_ENUM_SPEED_HIGH) { 1542 + dev->gadget.speed = USB_SPEED_HIGH; 1543 + } else if (tmp == UDC_DEVSTS_ENUM_SPEED_FULL) { 1544 + dev->gadget.speed = USB_SPEED_FULL; 1545 + } 1546 + 1547 + /* set basic ep parameters */ 1548 + for (tmp = 0; tmp < UDC_EP_NUM; tmp++) { 1549 + ep = &dev->ep[tmp]; 1550 + ep->dev = dev; 1551 + ep->ep.name = ep_string[tmp]; 1552 + ep->num = tmp; 1553 + /* txfifo size is calculated at enable time */ 1554 + ep->txfifo = dev->txfifo; 1555 + 1556 + /* fifo size */ 1557 + if (tmp < UDC_EPIN_NUM) { 1558 + ep->fifo_depth = UDC_TXFIFO_SIZE; 1559 + ep->in = 1; 1560 + } else { 1561 + ep->fifo_depth = UDC_RXFIFO_SIZE; 1562 + ep->in = 0; 1563 + 1564 + } 1565 + ep->regs = &dev->ep_regs[tmp]; 1566 + /* 1567 + * ep will be reset only if ep was not enabled before to avoid 1568 + * disabling ep interrupts when ENUM interrupt occurs but ep is 1569 + * not enabled by gadget driver 1570 + */ 1571 + if (!ep->desc) { 1572 + ep_init(dev->regs, ep); 1573 + } 1574 + 1575 + if (use_dma) { 1576 + /* 1577 + * ep->dma is not really used, just to indicate that 1578 + * DMA is active: remove this 1579 + * dma regs = dev control regs 1580 + */ 1581 + ep->dma = &dev->regs->ctl; 1582 + 1583 + /* nak OUT endpoints until enable - not for ep0 */ 1584 + if (tmp != UDC_EP0IN_IX && tmp != UDC_EP0OUT_IX 1585 + && tmp > UDC_EPIN_NUM) { 1586 + /* set NAK */ 1587 + reg = readl(&dev->ep[tmp].regs->ctl); 1588 + reg |= AMD_BIT(UDC_EPCTL_SNAK); 1589 + writel(reg, &dev->ep[tmp].regs->ctl); 1590 + dev->ep[tmp].naking = 1; 1591 + 1592 + } 1593 + } 1594 + } 1595 + /* EP0 max packet */ 1596 + if (dev->gadget.speed == USB_SPEED_FULL) { 1597 + dev->ep[UDC_EP0IN_IX].ep.maxpacket = UDC_FS_EP0IN_MAX_PKT_SIZE; 1598 + dev->ep[UDC_EP0OUT_IX].ep.maxpacket = 1599 + UDC_FS_EP0OUT_MAX_PKT_SIZE; 1600 + } else if (dev->gadget.speed == USB_SPEED_HIGH) { 1601 + dev->ep[UDC_EP0IN_IX].ep.maxpacket = UDC_EP0IN_MAX_PKT_SIZE; 1602 + dev->ep[UDC_EP0OUT_IX].ep.maxpacket = UDC_EP0OUT_MAX_PKT_SIZE; 1603 + } 1604 + 1605 + /* 1606 + * with suspend bug workaround, ep0 params for gadget driver 1607 + * are set at gadget driver bind() call 1608 + */ 1609 + dev->gadget.ep0 = &dev->ep[UDC_EP0IN_IX].ep; 1610 + dev->ep[UDC_EP0IN_IX].halted = 0; 1611 + INIT_LIST_HEAD(&dev->gadget.ep0->ep_list); 1612 + 1613 + /* init cfg/alt/int */ 1614 + dev->cur_config = 0; 1615 + dev->cur_intf = 0; 1616 + dev->cur_alt = 0; 1617 + } 1618 + 1619 + /* Bringup after Connect event, initial bringup to be ready for ep0 events */ 1620 + static void usb_connect(struct udc *dev) 1621 + { 1622 + 1623 + dev_info(&dev->pdev->dev, "USB Connect\n"); 1624 + 1625 + dev->connected = 1; 1626 + 1627 + /* put into initial config */ 1628 + udc_basic_init(dev); 1629 + 1630 + /* enable device setup interrupts */ 1631 + udc_enable_dev_setup_interrupts(dev); 1632 + } 1633 + 1634 + /* 1635 + * Calls gadget with disconnect event and resets the UDC and makes 1636 + * initial bringup to be ready for ep0 events 1637 + */ 1638 + static void usb_disconnect(struct udc *dev) 1639 + { 1640 + 1641 + dev_info(&dev->pdev->dev, "USB Disconnect\n"); 1642 + 1643 + dev->connected = 0; 1644 + 1645 + /* mask interrupts */ 1646 + udc_mask_unused_interrupts(dev); 1647 + 1648 + /* REVISIT there doesn't seem to be a point to having this 1649 + * talk to a tasklet ... do it directly, we already hold 1650 + * the spinlock needed to process the disconnect. 1651 + */ 1652 + 1653 + tasklet_schedule(&disconnect_tasklet); 1654 + } 1655 + 1656 + /* Tasklet for disconnect to be outside of interrupt context */ 1657 + static void udc_tasklet_disconnect(unsigned long par) 1658 + { 1659 + struct udc *dev = (struct udc *)(*((struct udc **) par)); 1660 + u32 tmp; 1661 + 1662 + DBG(dev, "Tasklet disconnect\n"); 1663 + spin_lock_irq(&dev->lock); 1664 + 1665 + if (dev->driver) { 1666 + spin_unlock(&dev->lock); 1667 + dev->driver->disconnect(&dev->gadget); 1668 + spin_lock(&dev->lock); 1669 + 1670 + /* empty queues */ 1671 + for (tmp = 0; tmp < UDC_EP_NUM; tmp++) { 1672 + empty_req_queue(&dev->ep[tmp]); 1673 + } 1674 + 1675 + } 1676 + 1677 + /* disable ep0 */ 1678 + ep_init(dev->regs, 1679 + &dev->ep[UDC_EP0IN_IX]); 1680 + 1681 + 1682 + if (!soft_reset_occured) { 1683 + /* init controller by soft reset */ 1684 + udc_soft_reset(dev); 1685 + soft_reset_occured++; 1686 + } 1687 + 1688 + /* re-enable dev interrupts */ 1689 + udc_enable_dev_setup_interrupts(dev); 1690 + /* back to full speed ? */ 1691 + if (use_fullspeed) { 1692 + tmp = readl(&dev->regs->cfg); 1693 + tmp = AMD_ADDBITS(tmp, UDC_DEVCFG_SPD_FS, UDC_DEVCFG_SPD); 1694 + writel(tmp, &dev->regs->cfg); 1695 + } 1696 + 1697 + spin_unlock_irq(&dev->lock); 1698 + } 1699 + 1700 + /* Reset the UDC core */ 1701 + static void udc_soft_reset(struct udc *dev) 1702 + { 1703 + unsigned long flags; 1704 + 1705 + DBG(dev, "Soft reset\n"); 1706 + /* 1707 + * reset possible waiting interrupts, because int. 1708 + * status is lost after soft reset, 1709 + * ep int. status reset 1710 + */ 1711 + writel(UDC_EPINT_MSK_DISABLE_ALL, &dev->regs->ep_irqsts); 1712 + /* device int. status reset */ 1713 + writel(UDC_DEV_MSK_DISABLE, &dev->regs->irqsts); 1714 + 1715 + spin_lock_irqsave(&udc_irq_spinlock, flags); 1716 + writel(AMD_BIT(UDC_DEVCFG_SOFTRESET), &dev->regs->cfg); 1717 + readl(&dev->regs->cfg); 1718 + spin_unlock_irqrestore(&udc_irq_spinlock, flags); 1719 + 1720 + } 1721 + 1722 + /* RDE timer callback to set RDE bit */ 1723 + static void udc_timer_function(unsigned long v) 1724 + { 1725 + u32 tmp; 1726 + 1727 + spin_lock_irq(&udc_irq_spinlock); 1728 + 1729 + if (set_rde > 0) { 1730 + /* 1731 + * open the fifo if fifo was filled on last timer call 1732 + * conditionally 1733 + */ 1734 + if (set_rde > 1) { 1735 + /* set RDE to receive setup data */ 1736 + tmp = readl(&udc->regs->ctl); 1737 + tmp |= AMD_BIT(UDC_DEVCTL_RDE); 1738 + writel(tmp, &udc->regs->ctl); 1739 + set_rde = -1; 1740 + } else if (readl(&udc->regs->sts) 1741 + & AMD_BIT(UDC_DEVSTS_RXFIFO_EMPTY)) { 1742 + /* 1743 + * if fifo empty setup polling, do not just 1744 + * open the fifo 1745 + */ 1746 + udc_timer.expires = jiffies + HZ/UDC_RDE_TIMER_DIV; 1747 + if (!stop_timer) { 1748 + add_timer(&udc_timer); 1749 + } 1750 + } else { 1751 + /* 1752 + * fifo contains data now, setup timer for opening 1753 + * the fifo when timer expires to be able to receive 1754 + * setup packets, when data packets gets queued by 1755 + * gadget layer then timer will forced to expire with 1756 + * set_rde=0 (RDE is set in udc_queue()) 1757 + */ 1758 + set_rde++; 1759 + /* debug: lhadmot_timer_start = 221070 */ 1760 + udc_timer.expires = jiffies + HZ*UDC_RDE_TIMER_SECONDS; 1761 + if (!stop_timer) { 1762 + add_timer(&udc_timer); 1763 + } 1764 + } 1765 + 1766 + } else 1767 + set_rde = -1; /* RDE was set by udc_queue() */ 1768 + spin_unlock_irq(&udc_irq_spinlock); 1769 + if (stop_timer) 1770 + complete(&on_exit); 1771 + 1772 + } 1773 + 1774 + /* Handle halt state, used in stall poll timer */ 1775 + static void udc_handle_halt_state(struct udc_ep *ep) 1776 + { 1777 + u32 tmp; 1778 + /* set stall as long not halted */ 1779 + if (ep->halted == 1) { 1780 + tmp = readl(&ep->regs->ctl); 1781 + /* STALL cleared ? */ 1782 + if (!(tmp & AMD_BIT(UDC_EPCTL_S))) { 1783 + /* 1784 + * FIXME: MSC spec requires that stall remains 1785 + * even on receivng of CLEAR_FEATURE HALT. So 1786 + * we would set STALL again here to be compliant. 1787 + * But with current mass storage drivers this does 1788 + * not work (would produce endless host retries). 1789 + * So we clear halt on CLEAR_FEATURE. 1790 + * 1791 + DBG(ep->dev, "ep %d: set STALL again\n", ep->num); 1792 + tmp |= AMD_BIT(UDC_EPCTL_S); 1793 + writel(tmp, &ep->regs->ctl);*/ 1794 + 1795 + /* clear NAK by writing CNAK */ 1796 + tmp |= AMD_BIT(UDC_EPCTL_CNAK); 1797 + writel(tmp, &ep->regs->ctl); 1798 + ep->halted = 0; 1799 + UDC_QUEUE_CNAK(ep, ep->num); 1800 + } 1801 + } 1802 + } 1803 + 1804 + /* Stall timer callback to poll S bit and set it again after */ 1805 + static void udc_pollstall_timer_function(unsigned long v) 1806 + { 1807 + struct udc_ep *ep; 1808 + int halted = 0; 1809 + 1810 + spin_lock_irq(&udc_stall_spinlock); 1811 + /* 1812 + * only one IN and OUT endpoints are handled 1813 + * IN poll stall 1814 + */ 1815 + ep = &udc->ep[UDC_EPIN_IX]; 1816 + udc_handle_halt_state(ep); 1817 + if (ep->halted) 1818 + halted = 1; 1819 + /* OUT poll stall */ 1820 + ep = &udc->ep[UDC_EPOUT_IX]; 1821 + udc_handle_halt_state(ep); 1822 + if (ep->halted) 1823 + halted = 1; 1824 + 1825 + /* setup timer again when still halted */ 1826 + if (!stop_pollstall_timer && halted) { 1827 + udc_pollstall_timer.expires = jiffies + 1828 + HZ * UDC_POLLSTALL_TIMER_USECONDS 1829 + / (1000 * 1000); 1830 + add_timer(&udc_pollstall_timer); 1831 + } 1832 + spin_unlock_irq(&udc_stall_spinlock); 1833 + 1834 + if (stop_pollstall_timer) 1835 + complete(&on_pollstall_exit); 1836 + } 1837 + 1838 + /* Inits endpoint 0 so that SETUP packets are processed */ 1839 + static void activate_control_endpoints(struct udc *dev) 1840 + { 1841 + u32 tmp; 1842 + 1843 + DBG(dev, "activate_control_endpoints\n"); 1844 + 1845 + /* flush fifo */ 1846 + tmp = readl(&dev->ep[UDC_EP0IN_IX].regs->ctl); 1847 + tmp |= AMD_BIT(UDC_EPCTL_F); 1848 + writel(tmp, &dev->ep[UDC_EP0IN_IX].regs->ctl); 1849 + 1850 + /* set ep0 directions */ 1851 + dev->ep[UDC_EP0IN_IX].in = 1; 1852 + dev->ep[UDC_EP0OUT_IX].in = 0; 1853 + 1854 + /* set buffer size (tx fifo entries) of EP0_IN */ 1855 + tmp = readl(&dev->ep[UDC_EP0IN_IX].regs->bufin_framenum); 1856 + if (dev->gadget.speed == USB_SPEED_FULL) 1857 + tmp = AMD_ADDBITS(tmp, UDC_FS_EPIN0_BUFF_SIZE, 1858 + UDC_EPIN_BUFF_SIZE); 1859 + else if (dev->gadget.speed == USB_SPEED_HIGH) 1860 + tmp = AMD_ADDBITS(tmp, UDC_EPIN0_BUFF_SIZE, 1861 + UDC_EPIN_BUFF_SIZE); 1862 + writel(tmp, &dev->ep[UDC_EP0IN_IX].regs->bufin_framenum); 1863 + 1864 + /* set max packet size of EP0_IN */ 1865 + tmp = readl(&dev->ep[UDC_EP0IN_IX].regs->bufout_maxpkt); 1866 + if (dev->gadget.speed == USB_SPEED_FULL) 1867 + tmp = AMD_ADDBITS(tmp, UDC_FS_EP0IN_MAX_PKT_SIZE, 1868 + UDC_EP_MAX_PKT_SIZE); 1869 + else if (dev->gadget.speed == USB_SPEED_HIGH) 1870 + tmp = AMD_ADDBITS(tmp, UDC_EP0IN_MAX_PKT_SIZE, 1871 + UDC_EP_MAX_PKT_SIZE); 1872 + writel(tmp, &dev->ep[UDC_EP0IN_IX].regs->bufout_maxpkt); 1873 + 1874 + /* set max packet size of EP0_OUT */ 1875 + tmp = readl(&dev->ep[UDC_EP0OUT_IX].regs->bufout_maxpkt); 1876 + if (dev->gadget.speed == USB_SPEED_FULL) 1877 + tmp = AMD_ADDBITS(tmp, UDC_FS_EP0OUT_MAX_PKT_SIZE, 1878 + UDC_EP_MAX_PKT_SIZE); 1879 + else if (dev->gadget.speed == USB_SPEED_HIGH) 1880 + tmp = AMD_ADDBITS(tmp, UDC_EP0OUT_MAX_PKT_SIZE, 1881 + UDC_EP_MAX_PKT_SIZE); 1882 + writel(tmp, &dev->ep[UDC_EP0OUT_IX].regs->bufout_maxpkt); 1883 + 1884 + /* set max packet size of EP0 in UDC CSR */ 1885 + tmp = readl(&dev->csr->ne[0]); 1886 + if (dev->gadget.speed == USB_SPEED_FULL) 1887 + tmp = AMD_ADDBITS(tmp, UDC_FS_EP0OUT_MAX_PKT_SIZE, 1888 + UDC_CSR_NE_MAX_PKT); 1889 + else if (dev->gadget.speed == USB_SPEED_HIGH) 1890 + tmp = AMD_ADDBITS(tmp, UDC_EP0OUT_MAX_PKT_SIZE, 1891 + UDC_CSR_NE_MAX_PKT); 1892 + writel(tmp, &dev->csr->ne[0]); 1893 + 1894 + if (use_dma) { 1895 + dev->ep[UDC_EP0OUT_IX].td->status |= 1896 + AMD_BIT(UDC_DMA_OUT_STS_L); 1897 + /* write dma desc address */ 1898 + writel(dev->ep[UDC_EP0OUT_IX].td_stp_dma, 1899 + &dev->ep[UDC_EP0OUT_IX].regs->subptr); 1900 + writel(dev->ep[UDC_EP0OUT_IX].td_phys, 1901 + &dev->ep[UDC_EP0OUT_IX].regs->desptr); 1902 + /* stop RDE timer */ 1903 + if (timer_pending(&udc_timer)) { 1904 + set_rde = 0; 1905 + mod_timer(&udc_timer, jiffies - 1); 1906 + } 1907 + /* stop pollstall timer */ 1908 + if (timer_pending(&udc_pollstall_timer)) { 1909 + mod_timer(&udc_pollstall_timer, jiffies - 1); 1910 + } 1911 + /* enable DMA */ 1912 + tmp = readl(&dev->regs->ctl); 1913 + tmp |= AMD_BIT(UDC_DEVCTL_MODE) 1914 + | AMD_BIT(UDC_DEVCTL_RDE) 1915 + | AMD_BIT(UDC_DEVCTL_TDE); 1916 + if (use_dma_bufferfill_mode) { 1917 + tmp |= AMD_BIT(UDC_DEVCTL_BF); 1918 + } else if (use_dma_ppb_du) { 1919 + tmp |= AMD_BIT(UDC_DEVCTL_DU); 1920 + } 1921 + writel(tmp, &dev->regs->ctl); 1922 + } 1923 + 1924 + /* clear NAK by writing CNAK for EP0IN */ 1925 + tmp = readl(&dev->ep[UDC_EP0IN_IX].regs->ctl); 1926 + tmp |= AMD_BIT(UDC_EPCTL_CNAK); 1927 + writel(tmp, &dev->ep[UDC_EP0IN_IX].regs->ctl); 1928 + dev->ep[UDC_EP0IN_IX].naking = 0; 1929 + UDC_QUEUE_CNAK(&dev->ep[UDC_EP0IN_IX], UDC_EP0IN_IX); 1930 + 1931 + /* clear NAK by writing CNAK for EP0OUT */ 1932 + tmp = readl(&dev->ep[UDC_EP0OUT_IX].regs->ctl); 1933 + tmp |= AMD_BIT(UDC_EPCTL_CNAK); 1934 + writel(tmp, &dev->ep[UDC_EP0OUT_IX].regs->ctl); 1935 + dev->ep[UDC_EP0OUT_IX].naking = 0; 1936 + UDC_QUEUE_CNAK(&dev->ep[UDC_EP0OUT_IX], UDC_EP0OUT_IX); 1937 + } 1938 + 1939 + /* Make endpoint 0 ready for control traffic */ 1940 + static int setup_ep0(struct udc *dev) 1941 + { 1942 + activate_control_endpoints(dev); 1943 + /* enable ep0 interrupts */ 1944 + udc_enable_ep0_interrupts(dev); 1945 + /* enable device setup interrupts */ 1946 + udc_enable_dev_setup_interrupts(dev); 1947 + 1948 + return 0; 1949 + } 1950 + 1951 + /* Called by gadget driver to register itself */ 1952 + int usb_gadget_register_driver(struct usb_gadget_driver *driver) 1953 + { 1954 + struct udc *dev = udc; 1955 + int retval; 1956 + u32 tmp; 1957 + 1958 + if (!driver || !driver->bind || !driver->setup 1959 + || driver->speed != USB_SPEED_HIGH) 1960 + return -EINVAL; 1961 + if (!dev) 1962 + return -ENODEV; 1963 + if (dev->driver) 1964 + return -EBUSY; 1965 + 1966 + driver->driver.bus = NULL; 1967 + dev->driver = driver; 1968 + dev->gadget.dev.driver = &driver->driver; 1969 + 1970 + retval = driver->bind(&dev->gadget); 1971 + 1972 + /* Some gadget drivers use both ep0 directions. 1973 + * NOTE: to gadget driver, ep0 is just one endpoint... 1974 + */ 1975 + dev->ep[UDC_EP0OUT_IX].ep.driver_data = 1976 + dev->ep[UDC_EP0IN_IX].ep.driver_data; 1977 + 1978 + if (retval) { 1979 + DBG(dev, "binding to %s returning %d\n", 1980 + driver->driver.name, retval); 1981 + dev->driver = NULL; 1982 + dev->gadget.dev.driver = NULL; 1983 + return retval; 1984 + } 1985 + 1986 + /* get ready for ep0 traffic */ 1987 + setup_ep0(dev); 1988 + 1989 + /* clear SD */ 1990 + tmp = readl(&dev->regs->ctl); 1991 + tmp = tmp & AMD_CLEAR_BIT(UDC_DEVCTL_SD); 1992 + writel(tmp, &dev->regs->ctl); 1993 + 1994 + usb_connect(dev); 1995 + 1996 + return 0; 1997 + } 1998 + EXPORT_SYMBOL(usb_gadget_register_driver); 1999 + 2000 + /* shutdown requests and disconnect from gadget */ 2001 + static void 2002 + shutdown(struct udc *dev, struct usb_gadget_driver *driver) 2003 + __releases(dev->lock) 2004 + __acquires(dev->lock) 2005 + { 2006 + int tmp; 2007 + 2008 + /* empty queues and init hardware */ 2009 + udc_basic_init(dev); 2010 + for (tmp = 0; tmp < UDC_EP_NUM; tmp++) { 2011 + empty_req_queue(&dev->ep[tmp]); 2012 + } 2013 + 2014 + if (dev->gadget.speed != USB_SPEED_UNKNOWN) { 2015 + spin_unlock(&dev->lock); 2016 + driver->disconnect(&dev->gadget); 2017 + spin_lock(&dev->lock); 2018 + } 2019 + /* init */ 2020 + udc_setup_endpoints(dev); 2021 + } 2022 + 2023 + /* Called by gadget driver to unregister itself */ 2024 + int usb_gadget_unregister_driver(struct usb_gadget_driver *driver) 2025 + { 2026 + struct udc *dev = udc; 2027 + unsigned long flags; 2028 + u32 tmp; 2029 + 2030 + if (!dev) 2031 + return -ENODEV; 2032 + if (!driver || driver != dev->driver || !driver->unbind) 2033 + return -EINVAL; 2034 + 2035 + spin_lock_irqsave(&dev->lock, flags); 2036 + udc_mask_unused_interrupts(dev); 2037 + shutdown(dev, driver); 2038 + spin_unlock_irqrestore(&dev->lock, flags); 2039 + 2040 + driver->unbind(&dev->gadget); 2041 + dev->driver = NULL; 2042 + 2043 + /* set SD */ 2044 + tmp = readl(&dev->regs->ctl); 2045 + tmp |= AMD_BIT(UDC_DEVCTL_SD); 2046 + writel(tmp, &dev->regs->ctl); 2047 + 2048 + 2049 + DBG(dev, "%s: unregistered\n", driver->driver.name); 2050 + 2051 + return 0; 2052 + } 2053 + EXPORT_SYMBOL(usb_gadget_unregister_driver); 2054 + 2055 + 2056 + /* Clear pending NAK bits */ 2057 + static void udc_process_cnak_queue(struct udc *dev) 2058 + { 2059 + u32 tmp; 2060 + u32 reg; 2061 + 2062 + /* check epin's */ 2063 + DBG(dev, "CNAK pending queue processing\n"); 2064 + for (tmp = 0; tmp < UDC_EPIN_NUM_USED; tmp++) { 2065 + if (cnak_pending & (1 << tmp)) { 2066 + DBG(dev, "CNAK pending for ep%d\n", tmp); 2067 + /* clear NAK by writing CNAK */ 2068 + reg = readl(&dev->ep[tmp].regs->ctl); 2069 + reg |= AMD_BIT(UDC_EPCTL_CNAK); 2070 + writel(reg, &dev->ep[tmp].regs->ctl); 2071 + dev->ep[tmp].naking = 0; 2072 + UDC_QUEUE_CNAK(&dev->ep[tmp], dev->ep[tmp].num); 2073 + } 2074 + } 2075 + /* ... and ep0out */ 2076 + if (cnak_pending & (1 << UDC_EP0OUT_IX)) { 2077 + DBG(dev, "CNAK pending for ep%d\n", UDC_EP0OUT_IX); 2078 + /* clear NAK by writing CNAK */ 2079 + reg = readl(&dev->ep[UDC_EP0OUT_IX].regs->ctl); 2080 + reg |= AMD_BIT(UDC_EPCTL_CNAK); 2081 + writel(reg, &dev->ep[UDC_EP0OUT_IX].regs->ctl); 2082 + dev->ep[UDC_EP0OUT_IX].naking = 0; 2083 + UDC_QUEUE_CNAK(&dev->ep[UDC_EP0OUT_IX], 2084 + dev->ep[UDC_EP0OUT_IX].num); 2085 + } 2086 + } 2087 + 2088 + /* Enabling RX DMA after setup packet */ 2089 + static void udc_ep0_set_rde(struct udc *dev) 2090 + { 2091 + if (use_dma) { 2092 + /* 2093 + * only enable RXDMA when no data endpoint enabled 2094 + * or data is queued 2095 + */ 2096 + if (!dev->data_ep_enabled || dev->data_ep_queued) { 2097 + udc_set_rde(dev); 2098 + } else { 2099 + /* 2100 + * setup timer for enabling RDE (to not enable 2101 + * RXFIFO DMA for data endpoints to early) 2102 + */ 2103 + if (set_rde != 0 && !timer_pending(&udc_timer)) { 2104 + udc_timer.expires = 2105 + jiffies + HZ/UDC_RDE_TIMER_DIV; 2106 + set_rde = 1; 2107 + if (!stop_timer) { 2108 + add_timer(&udc_timer); 2109 + } 2110 + } 2111 + } 2112 + } 2113 + } 2114 + 2115 + 2116 + /* Interrupt handler for data OUT traffic */ 2117 + static irqreturn_t udc_data_out_isr(struct udc *dev, int ep_ix) 2118 + { 2119 + irqreturn_t ret_val = IRQ_NONE; 2120 + u32 tmp; 2121 + struct udc_ep *ep; 2122 + struct udc_request *req; 2123 + unsigned int count; 2124 + struct udc_data_dma *td = NULL; 2125 + unsigned dma_done; 2126 + 2127 + VDBG(dev, "ep%d irq\n", ep_ix); 2128 + ep = &dev->ep[ep_ix]; 2129 + 2130 + tmp = readl(&ep->regs->sts); 2131 + if (use_dma) { 2132 + /* BNA event ? */ 2133 + if (tmp & AMD_BIT(UDC_EPSTS_BNA)) { 2134 + DBG(dev, "BNA ep%dout occured - DESPTR = %x \n", 2135 + ep->num, readl(&ep->regs->desptr)); 2136 + /* clear BNA */ 2137 + writel(tmp | AMD_BIT(UDC_EPSTS_BNA), &ep->regs->sts); 2138 + if (!ep->cancel_transfer) 2139 + ep->bna_occurred = 1; 2140 + else 2141 + ep->cancel_transfer = 0; 2142 + ret_val = IRQ_HANDLED; 2143 + goto finished; 2144 + } 2145 + } 2146 + /* HE event ? */ 2147 + if (tmp & AMD_BIT(UDC_EPSTS_HE)) { 2148 + dev_err(&dev->pdev->dev, "HE ep%dout occured\n", ep->num); 2149 + 2150 + /* clear HE */ 2151 + writel(tmp | AMD_BIT(UDC_EPSTS_HE), &ep->regs->sts); 2152 + ret_val = IRQ_HANDLED; 2153 + goto finished; 2154 + } 2155 + 2156 + if (!list_empty(&ep->queue)) { 2157 + 2158 + /* next request */ 2159 + req = list_entry(ep->queue.next, 2160 + struct udc_request, queue); 2161 + } else { 2162 + req = NULL; 2163 + udc_rxfifo_pending = 1; 2164 + } 2165 + VDBG(dev, "req = %p\n", req); 2166 + /* fifo mode */ 2167 + if (!use_dma) { 2168 + 2169 + /* read fifo */ 2170 + if (req && udc_rxfifo_read(ep, req)) { 2171 + ret_val = IRQ_HANDLED; 2172 + 2173 + /* finish */ 2174 + complete_req(ep, req, 0); 2175 + /* next request */ 2176 + if (!list_empty(&ep->queue) && !ep->halted) { 2177 + req = list_entry(ep->queue.next, 2178 + struct udc_request, queue); 2179 + } else 2180 + req = NULL; 2181 + } 2182 + 2183 + /* DMA */ 2184 + } else if (!ep->cancel_transfer && req != NULL) { 2185 + ret_val = IRQ_HANDLED; 2186 + 2187 + /* check for DMA done */ 2188 + if (!use_dma_ppb) { 2189 + dma_done = AMD_GETBITS(req->td_data->status, 2190 + UDC_DMA_OUT_STS_BS); 2191 + /* packet per buffer mode - rx bytes */ 2192 + } else { 2193 + /* 2194 + * if BNA occurred then recover desc. from 2195 + * BNA dummy desc. 2196 + */ 2197 + if (ep->bna_occurred) { 2198 + VDBG(dev, "Recover desc. from BNA dummy\n"); 2199 + memcpy(req->td_data, ep->bna_dummy_req->td_data, 2200 + sizeof(struct udc_data_dma)); 2201 + ep->bna_occurred = 0; 2202 + udc_init_bna_dummy(ep->req); 2203 + } 2204 + td = udc_get_last_dma_desc(req); 2205 + dma_done = AMD_GETBITS(td->status, UDC_DMA_OUT_STS_BS); 2206 + } 2207 + if (dma_done == UDC_DMA_OUT_STS_BS_DMA_DONE) { 2208 + /* buffer fill mode - rx bytes */ 2209 + if (!use_dma_ppb) { 2210 + /* received number bytes */ 2211 + count = AMD_GETBITS(req->td_data->status, 2212 + UDC_DMA_OUT_STS_RXBYTES); 2213 + VDBG(dev, "rx bytes=%u\n", count); 2214 + /* packet per buffer mode - rx bytes */ 2215 + } else { 2216 + VDBG(dev, "req->td_data=%p\n", req->td_data); 2217 + VDBG(dev, "last desc = %p\n", td); 2218 + /* received number bytes */ 2219 + if (use_dma_ppb_du) { 2220 + /* every desc. counts bytes */ 2221 + count = udc_get_ppbdu_rxbytes(req); 2222 + } else { 2223 + /* last desc. counts bytes */ 2224 + count = AMD_GETBITS(td->status, 2225 + UDC_DMA_OUT_STS_RXBYTES); 2226 + if (!count && req->req.length 2227 + == UDC_DMA_MAXPACKET) { 2228 + /* 2229 + * on 64k packets the RXBYTES 2230 + * field is zero 2231 + */ 2232 + count = UDC_DMA_MAXPACKET; 2233 + } 2234 + } 2235 + VDBG(dev, "last desc rx bytes=%u\n", count); 2236 + } 2237 + 2238 + tmp = req->req.length - req->req.actual; 2239 + if (count > tmp) { 2240 + if ((tmp % ep->ep.maxpacket) != 0) { 2241 + DBG(dev, "%s: rx %db, space=%db\n", 2242 + ep->ep.name, count, tmp); 2243 + req->req.status = -EOVERFLOW; 2244 + } 2245 + count = tmp; 2246 + } 2247 + req->req.actual += count; 2248 + req->dma_going = 0; 2249 + /* complete request */ 2250 + complete_req(ep, req, 0); 2251 + 2252 + /* next request */ 2253 + if (!list_empty(&ep->queue) && !ep->halted) { 2254 + req = list_entry(ep->queue.next, 2255 + struct udc_request, 2256 + queue); 2257 + /* 2258 + * DMA may be already started by udc_queue() 2259 + * called by gadget drivers completion 2260 + * routine. This happens when queue 2261 + * holds one request only. 2262 + */ 2263 + if (req->dma_going == 0) { 2264 + /* next dma */ 2265 + if (prep_dma(ep, req, GFP_ATOMIC) != 0) 2266 + goto finished; 2267 + /* write desc pointer */ 2268 + writel(req->td_phys, 2269 + &ep->regs->desptr); 2270 + req->dma_going = 1; 2271 + /* enable DMA */ 2272 + udc_set_rde(dev); 2273 + } 2274 + } else { 2275 + /* 2276 + * implant BNA dummy descriptor to allow 2277 + * RXFIFO opening by RDE 2278 + */ 2279 + if (ep->bna_dummy_req) { 2280 + /* write desc pointer */ 2281 + writel(ep->bna_dummy_req->td_phys, 2282 + &ep->regs->desptr); 2283 + ep->bna_occurred = 0; 2284 + } 2285 + 2286 + /* 2287 + * schedule timer for setting RDE if queue 2288 + * remains empty to allow ep0 packets pass 2289 + * through 2290 + */ 2291 + if (set_rde != 0 2292 + && !timer_pending(&udc_timer)) { 2293 + udc_timer.expires = 2294 + jiffies 2295 + + HZ*UDC_RDE_TIMER_SECONDS; 2296 + set_rde = 1; 2297 + if (!stop_timer) { 2298 + add_timer(&udc_timer); 2299 + } 2300 + } 2301 + if (ep->num != UDC_EP0OUT_IX) 2302 + dev->data_ep_queued = 0; 2303 + } 2304 + 2305 + } else { 2306 + /* 2307 + * RX DMA must be reenabled for each desc in PPBDU mode 2308 + * and must be enabled for PPBNDU mode in case of BNA 2309 + */ 2310 + udc_set_rde(dev); 2311 + } 2312 + 2313 + } else if (ep->cancel_transfer) { 2314 + ret_val = IRQ_HANDLED; 2315 + ep->cancel_transfer = 0; 2316 + } 2317 + 2318 + /* check pending CNAKS */ 2319 + if (cnak_pending) { 2320 + /* CNAk processing when rxfifo empty only */ 2321 + if (readl(&dev->regs->sts) & AMD_BIT(UDC_DEVSTS_RXFIFO_EMPTY)) { 2322 + udc_process_cnak_queue(dev); 2323 + } 2324 + } 2325 + 2326 + /* clear OUT bits in ep status */ 2327 + writel(UDC_EPSTS_OUT_CLEAR, &ep->regs->sts); 2328 + finished: 2329 + return ret_val; 2330 + } 2331 + 2332 + /* Interrupt handler for data IN traffic */ 2333 + static irqreturn_t udc_data_in_isr(struct udc *dev, int ep_ix) 2334 + { 2335 + irqreturn_t ret_val = IRQ_NONE; 2336 + u32 tmp; 2337 + u32 epsts; 2338 + struct udc_ep *ep; 2339 + struct udc_request *req; 2340 + struct udc_data_dma *td; 2341 + unsigned dma_done; 2342 + unsigned len; 2343 + 2344 + ep = &dev->ep[ep_ix]; 2345 + 2346 + epsts = readl(&ep->regs->sts); 2347 + if (use_dma) { 2348 + /* BNA ? */ 2349 + if (epsts & AMD_BIT(UDC_EPSTS_BNA)) { 2350 + dev_err(&dev->pdev->dev, 2351 + "BNA ep%din occured - DESPTR = %08lx \n", 2352 + ep->num, 2353 + (unsigned long) readl(&ep->regs->desptr)); 2354 + 2355 + /* clear BNA */ 2356 + writel(epsts, &ep->regs->sts); 2357 + ret_val = IRQ_HANDLED; 2358 + goto finished; 2359 + } 2360 + } 2361 + /* HE event ? */ 2362 + if (epsts & AMD_BIT(UDC_EPSTS_HE)) { 2363 + dev_err(&dev->pdev->dev, 2364 + "HE ep%dn occured - DESPTR = %08lx \n", 2365 + ep->num, (unsigned long) readl(&ep->regs->desptr)); 2366 + 2367 + /* clear HE */ 2368 + writel(epsts | AMD_BIT(UDC_EPSTS_HE), &ep->regs->sts); 2369 + ret_val = IRQ_HANDLED; 2370 + goto finished; 2371 + } 2372 + 2373 + /* DMA completion */ 2374 + if (epsts & AMD_BIT(UDC_EPSTS_TDC)) { 2375 + VDBG(dev, "TDC set- completion\n"); 2376 + ret_val = IRQ_HANDLED; 2377 + if (!ep->cancel_transfer && !list_empty(&ep->queue)) { 2378 + req = list_entry(ep->queue.next, 2379 + struct udc_request, queue); 2380 + if (req) { 2381 + /* 2382 + * length bytes transfered 2383 + * check dma done of last desc. in PPBDU mode 2384 + */ 2385 + if (use_dma_ppb_du) { 2386 + td = udc_get_last_dma_desc(req); 2387 + if (td) { 2388 + dma_done = 2389 + AMD_GETBITS(td->status, 2390 + UDC_DMA_IN_STS_BS); 2391 + /* don't care DMA done */ 2392 + req->req.actual = 2393 + req->req.length; 2394 + } 2395 + } else { 2396 + /* assume all bytes transferred */ 2397 + req->req.actual = req->req.length; 2398 + } 2399 + 2400 + if (req->req.actual == req->req.length) { 2401 + /* complete req */ 2402 + complete_req(ep, req, 0); 2403 + req->dma_going = 0; 2404 + /* further request available ? */ 2405 + if (list_empty(&ep->queue)) { 2406 + /* disable interrupt */ 2407 + tmp = readl( 2408 + &dev->regs->ep_irqmsk); 2409 + tmp |= AMD_BIT(ep->num); 2410 + writel(tmp, 2411 + &dev->regs->ep_irqmsk); 2412 + } 2413 + 2414 + } 2415 + } 2416 + } 2417 + ep->cancel_transfer = 0; 2418 + 2419 + } 2420 + /* 2421 + * status reg has IN bit set and TDC not set (if TDC was handled, 2422 + * IN must not be handled (UDC defect) ? 2423 + */ 2424 + if ((epsts & AMD_BIT(UDC_EPSTS_IN)) 2425 + && !(epsts & AMD_BIT(UDC_EPSTS_TDC))) { 2426 + ret_val = IRQ_HANDLED; 2427 + if (!list_empty(&ep->queue)) { 2428 + /* next request */ 2429 + req = list_entry(ep->queue.next, 2430 + struct udc_request, queue); 2431 + /* FIFO mode */ 2432 + if (!use_dma) { 2433 + /* write fifo */ 2434 + udc_txfifo_write(ep, &req->req); 2435 + len = req->req.length - req->req.actual; 2436 + if (len > ep->ep.maxpacket) 2437 + len = ep->ep.maxpacket; 2438 + req->req.actual += len; 2439 + if (req->req.actual == req->req.length 2440 + || (len != ep->ep.maxpacket)) { 2441 + /* complete req */ 2442 + complete_req(ep, req, 0); 2443 + } 2444 + /* DMA */ 2445 + } else if (req && !req->dma_going) { 2446 + VDBG(dev, "IN DMA : req=%p req->td_data=%p\n", 2447 + req, req->td_data); 2448 + if (req->td_data) { 2449 + 2450 + req->dma_going = 1; 2451 + 2452 + /* 2453 + * unset L bit of first desc. 2454 + * for chain 2455 + */ 2456 + if (use_dma_ppb && req->req.length > 2457 + ep->ep.maxpacket) { 2458 + req->td_data->status &= 2459 + AMD_CLEAR_BIT( 2460 + UDC_DMA_IN_STS_L); 2461 + } 2462 + 2463 + /* write desc pointer */ 2464 + writel(req->td_phys, &ep->regs->desptr); 2465 + 2466 + /* set HOST READY */ 2467 + req->td_data->status = 2468 + AMD_ADDBITS( 2469 + req->td_data->status, 2470 + UDC_DMA_IN_STS_BS_HOST_READY, 2471 + UDC_DMA_IN_STS_BS); 2472 + 2473 + /* set poll demand bit */ 2474 + tmp = readl(&ep->regs->ctl); 2475 + tmp |= AMD_BIT(UDC_EPCTL_P); 2476 + writel(tmp, &ep->regs->ctl); 2477 + } 2478 + } 2479 + 2480 + } 2481 + } 2482 + /* clear status bits */ 2483 + writel(epsts, &ep->regs->sts); 2484 + 2485 + finished: 2486 + return ret_val; 2487 + 2488 + } 2489 + 2490 + /* Interrupt handler for Control OUT traffic */ 2491 + static irqreturn_t udc_control_out_isr(struct udc *dev) 2492 + __releases(dev->lock) 2493 + __acquires(dev->lock) 2494 + { 2495 + irqreturn_t ret_val = IRQ_NONE; 2496 + u32 tmp; 2497 + int setup_supported; 2498 + u32 count; 2499 + int set = 0; 2500 + struct udc_ep *ep; 2501 + struct udc_ep *ep_tmp; 2502 + 2503 + ep = &dev->ep[UDC_EP0OUT_IX]; 2504 + 2505 + /* clear irq */ 2506 + writel(AMD_BIT(UDC_EPINT_OUT_EP0), &dev->regs->ep_irqsts); 2507 + 2508 + tmp = readl(&dev->ep[UDC_EP0OUT_IX].regs->sts); 2509 + /* check BNA and clear if set */ 2510 + if (tmp & AMD_BIT(UDC_EPSTS_BNA)) { 2511 + VDBG(dev, "ep0: BNA set\n"); 2512 + writel(AMD_BIT(UDC_EPSTS_BNA), 2513 + &dev->ep[UDC_EP0OUT_IX].regs->sts); 2514 + ep->bna_occurred = 1; 2515 + ret_val = IRQ_HANDLED; 2516 + goto finished; 2517 + } 2518 + 2519 + /* type of data: SETUP or DATA 0 bytes */ 2520 + tmp = AMD_GETBITS(tmp, UDC_EPSTS_OUT); 2521 + VDBG(dev, "data_typ = %x\n", tmp); 2522 + 2523 + /* setup data */ 2524 + if (tmp == UDC_EPSTS_OUT_SETUP) { 2525 + ret_val = IRQ_HANDLED; 2526 + 2527 + ep->dev->stall_ep0in = 0; 2528 + dev->waiting_zlp_ack_ep0in = 0; 2529 + 2530 + /* set NAK for EP0_IN */ 2531 + tmp = readl(&dev->ep[UDC_EP0IN_IX].regs->ctl); 2532 + tmp |= AMD_BIT(UDC_EPCTL_SNAK); 2533 + writel(tmp, &dev->ep[UDC_EP0IN_IX].regs->ctl); 2534 + dev->ep[UDC_EP0IN_IX].naking = 1; 2535 + /* get setup data */ 2536 + if (use_dma) { 2537 + 2538 + /* clear OUT bits in ep status */ 2539 + writel(UDC_EPSTS_OUT_CLEAR, 2540 + &dev->ep[UDC_EP0OUT_IX].regs->sts); 2541 + 2542 + setup_data.data[0] = 2543 + dev->ep[UDC_EP0OUT_IX].td_stp->data12; 2544 + setup_data.data[1] = 2545 + dev->ep[UDC_EP0OUT_IX].td_stp->data34; 2546 + /* set HOST READY */ 2547 + dev->ep[UDC_EP0OUT_IX].td_stp->status = 2548 + UDC_DMA_STP_STS_BS_HOST_READY; 2549 + } else { 2550 + /* read fifo */ 2551 + udc_rxfifo_read_dwords(dev, setup_data.data, 2); 2552 + } 2553 + 2554 + /* determine direction of control data */ 2555 + if ((setup_data.request.bRequestType & USB_DIR_IN) != 0) { 2556 + dev->gadget.ep0 = &dev->ep[UDC_EP0IN_IX].ep; 2557 + /* enable RDE */ 2558 + udc_ep0_set_rde(dev); 2559 + set = 0; 2560 + } else { 2561 + dev->gadget.ep0 = &dev->ep[UDC_EP0OUT_IX].ep; 2562 + /* 2563 + * implant BNA dummy descriptor to allow RXFIFO opening 2564 + * by RDE 2565 + */ 2566 + if (ep->bna_dummy_req) { 2567 + /* write desc pointer */ 2568 + writel(ep->bna_dummy_req->td_phys, 2569 + &dev->ep[UDC_EP0OUT_IX].regs->desptr); 2570 + ep->bna_occurred = 0; 2571 + } 2572 + 2573 + set = 1; 2574 + dev->ep[UDC_EP0OUT_IX].naking = 1; 2575 + /* 2576 + * setup timer for enabling RDE (to not enable 2577 + * RXFIFO DMA for data to early) 2578 + */ 2579 + set_rde = 1; 2580 + if (!timer_pending(&udc_timer)) { 2581 + udc_timer.expires = jiffies + 2582 + HZ/UDC_RDE_TIMER_DIV; 2583 + if (!stop_timer) { 2584 + add_timer(&udc_timer); 2585 + } 2586 + } 2587 + } 2588 + 2589 + /* 2590 + * mass storage reset must be processed here because 2591 + * next packet may be a CLEAR_FEATURE HALT which would not 2592 + * clear the stall bit when no STALL handshake was received 2593 + * before (autostall can cause this) 2594 + */ 2595 + if (setup_data.data[0] == UDC_MSCRES_DWORD0 2596 + && setup_data.data[1] == UDC_MSCRES_DWORD1) { 2597 + DBG(dev, "MSC Reset\n"); 2598 + /* 2599 + * clear stall bits 2600 + * only one IN and OUT endpoints are handled 2601 + */ 2602 + ep_tmp = &udc->ep[UDC_EPIN_IX]; 2603 + udc_set_halt(&ep_tmp->ep, 0); 2604 + ep_tmp = &udc->ep[UDC_EPOUT_IX]; 2605 + udc_set_halt(&ep_tmp->ep, 0); 2606 + } 2607 + 2608 + /* call gadget with setup data received */ 2609 + spin_unlock(&dev->lock); 2610 + setup_supported = dev->driver->setup(&dev->gadget, 2611 + &setup_data.request); 2612 + spin_lock(&dev->lock); 2613 + 2614 + tmp = readl(&dev->ep[UDC_EP0IN_IX].regs->ctl); 2615 + /* ep0 in returns data (not zlp) on IN phase */ 2616 + if (setup_supported >= 0 && setup_supported < 2617 + UDC_EP0IN_MAXPACKET) { 2618 + /* clear NAK by writing CNAK in EP0_IN */ 2619 + tmp |= AMD_BIT(UDC_EPCTL_CNAK); 2620 + writel(tmp, &dev->ep[UDC_EP0IN_IX].regs->ctl); 2621 + dev->ep[UDC_EP0IN_IX].naking = 0; 2622 + UDC_QUEUE_CNAK(&dev->ep[UDC_EP0IN_IX], UDC_EP0IN_IX); 2623 + 2624 + /* if unsupported request then stall */ 2625 + } else if (setup_supported < 0) { 2626 + tmp |= AMD_BIT(UDC_EPCTL_S); 2627 + writel(tmp, &dev->ep[UDC_EP0IN_IX].regs->ctl); 2628 + } else 2629 + dev->waiting_zlp_ack_ep0in = 1; 2630 + 2631 + 2632 + /* clear NAK by writing CNAK in EP0_OUT */ 2633 + if (!set) { 2634 + tmp = readl(&dev->ep[UDC_EP0OUT_IX].regs->ctl); 2635 + tmp |= AMD_BIT(UDC_EPCTL_CNAK); 2636 + writel(tmp, &dev->ep[UDC_EP0OUT_IX].regs->ctl); 2637 + dev->ep[UDC_EP0OUT_IX].naking = 0; 2638 + UDC_QUEUE_CNAK(&dev->ep[UDC_EP0OUT_IX], UDC_EP0OUT_IX); 2639 + } 2640 + 2641 + if (!use_dma) { 2642 + /* clear OUT bits in ep status */ 2643 + writel(UDC_EPSTS_OUT_CLEAR, 2644 + &dev->ep[UDC_EP0OUT_IX].regs->sts); 2645 + } 2646 + 2647 + /* data packet 0 bytes */ 2648 + } else if (tmp == UDC_EPSTS_OUT_DATA) { 2649 + /* clear OUT bits in ep status */ 2650 + writel(UDC_EPSTS_OUT_CLEAR, &dev->ep[UDC_EP0OUT_IX].regs->sts); 2651 + 2652 + /* get setup data: only 0 packet */ 2653 + if (use_dma) { 2654 + /* no req if 0 packet, just reactivate */ 2655 + if (list_empty(&dev->ep[UDC_EP0OUT_IX].queue)) { 2656 + VDBG(dev, "ZLP\n"); 2657 + 2658 + /* set HOST READY */ 2659 + dev->ep[UDC_EP0OUT_IX].td->status = 2660 + AMD_ADDBITS( 2661 + dev->ep[UDC_EP0OUT_IX].td->status, 2662 + UDC_DMA_OUT_STS_BS_HOST_READY, 2663 + UDC_DMA_OUT_STS_BS); 2664 + /* enable RDE */ 2665 + udc_ep0_set_rde(dev); 2666 + ret_val = IRQ_HANDLED; 2667 + 2668 + } else { 2669 + /* control write */ 2670 + ret_val |= udc_data_out_isr(dev, UDC_EP0OUT_IX); 2671 + /* re-program desc. pointer for possible ZLPs */ 2672 + writel(dev->ep[UDC_EP0OUT_IX].td_phys, 2673 + &dev->ep[UDC_EP0OUT_IX].regs->desptr); 2674 + /* enable RDE */ 2675 + udc_ep0_set_rde(dev); 2676 + } 2677 + } else { 2678 + 2679 + /* received number bytes */ 2680 + count = readl(&dev->ep[UDC_EP0OUT_IX].regs->sts); 2681 + count = AMD_GETBITS(count, UDC_EPSTS_RX_PKT_SIZE); 2682 + /* out data for fifo mode not working */ 2683 + count = 0; 2684 + 2685 + /* 0 packet or real data ? */ 2686 + if (count != 0) { 2687 + ret_val |= udc_data_out_isr(dev, UDC_EP0OUT_IX); 2688 + } else { 2689 + /* dummy read confirm */ 2690 + readl(&dev->ep[UDC_EP0OUT_IX].regs->confirm); 2691 + ret_val = IRQ_HANDLED; 2692 + } 2693 + } 2694 + } 2695 + 2696 + /* check pending CNAKS */ 2697 + if (cnak_pending) { 2698 + /* CNAk processing when rxfifo empty only */ 2699 + if (readl(&dev->regs->sts) & AMD_BIT(UDC_DEVSTS_RXFIFO_EMPTY)) { 2700 + udc_process_cnak_queue(dev); 2701 + } 2702 + } 2703 + 2704 + finished: 2705 + return ret_val; 2706 + } 2707 + 2708 + /* Interrupt handler for Control IN traffic */ 2709 + static irqreturn_t udc_control_in_isr(struct udc *dev) 2710 + { 2711 + irqreturn_t ret_val = IRQ_NONE; 2712 + u32 tmp; 2713 + struct udc_ep *ep; 2714 + struct udc_request *req; 2715 + unsigned len; 2716 + 2717 + ep = &dev->ep[UDC_EP0IN_IX]; 2718 + 2719 + /* clear irq */ 2720 + writel(AMD_BIT(UDC_EPINT_IN_EP0), &dev->regs->ep_irqsts); 2721 + 2722 + tmp = readl(&dev->ep[UDC_EP0IN_IX].regs->sts); 2723 + /* DMA completion */ 2724 + if (tmp & AMD_BIT(UDC_EPSTS_TDC)) { 2725 + VDBG(dev, "isr: TDC clear \n"); 2726 + ret_val = IRQ_HANDLED; 2727 + 2728 + /* clear TDC bit */ 2729 + writel(AMD_BIT(UDC_EPSTS_TDC), 2730 + &dev->ep[UDC_EP0IN_IX].regs->sts); 2731 + 2732 + /* status reg has IN bit set ? */ 2733 + } else if (tmp & AMD_BIT(UDC_EPSTS_IN)) { 2734 + ret_val = IRQ_HANDLED; 2735 + 2736 + if (ep->dma) { 2737 + /* clear IN bit */ 2738 + writel(AMD_BIT(UDC_EPSTS_IN), 2739 + &dev->ep[UDC_EP0IN_IX].regs->sts); 2740 + } 2741 + if (dev->stall_ep0in) { 2742 + DBG(dev, "stall ep0in\n"); 2743 + /* halt ep0in */ 2744 + tmp = readl(&ep->regs->ctl); 2745 + tmp |= AMD_BIT(UDC_EPCTL_S); 2746 + writel(tmp, &ep->regs->ctl); 2747 + } else { 2748 + if (!list_empty(&ep->queue)) { 2749 + /* next request */ 2750 + req = list_entry(ep->queue.next, 2751 + struct udc_request, queue); 2752 + 2753 + if (ep->dma) { 2754 + /* write desc pointer */ 2755 + writel(req->td_phys, &ep->regs->desptr); 2756 + /* set HOST READY */ 2757 + req->td_data->status = 2758 + AMD_ADDBITS( 2759 + req->td_data->status, 2760 + UDC_DMA_STP_STS_BS_HOST_READY, 2761 + UDC_DMA_STP_STS_BS); 2762 + 2763 + /* set poll demand bit */ 2764 + tmp = 2765 + readl(&dev->ep[UDC_EP0IN_IX].regs->ctl); 2766 + tmp |= AMD_BIT(UDC_EPCTL_P); 2767 + writel(tmp, 2768 + &dev->ep[UDC_EP0IN_IX].regs->ctl); 2769 + 2770 + /* all bytes will be transferred */ 2771 + req->req.actual = req->req.length; 2772 + 2773 + /* complete req */ 2774 + complete_req(ep, req, 0); 2775 + 2776 + } else { 2777 + /* write fifo */ 2778 + udc_txfifo_write(ep, &req->req); 2779 + 2780 + /* lengh bytes transfered */ 2781 + len = req->req.length - req->req.actual; 2782 + if (len > ep->ep.maxpacket) 2783 + len = ep->ep.maxpacket; 2784 + 2785 + req->req.actual += len; 2786 + if (req->req.actual == req->req.length 2787 + || (len != ep->ep.maxpacket)) { 2788 + /* complete req */ 2789 + complete_req(ep, req, 0); 2790 + } 2791 + } 2792 + 2793 + } 2794 + } 2795 + ep->halted = 0; 2796 + dev->stall_ep0in = 0; 2797 + if (!ep->dma) { 2798 + /* clear IN bit */ 2799 + writel(AMD_BIT(UDC_EPSTS_IN), 2800 + &dev->ep[UDC_EP0IN_IX].regs->sts); 2801 + } 2802 + } 2803 + 2804 + return ret_val; 2805 + } 2806 + 2807 + 2808 + /* Interrupt handler for global device events */ 2809 + static irqreturn_t udc_dev_isr(struct udc *dev, u32 dev_irq) 2810 + __releases(dev->lock) 2811 + __acquires(dev->lock) 2812 + { 2813 + irqreturn_t ret_val = IRQ_NONE; 2814 + u32 tmp; 2815 + u32 cfg; 2816 + struct udc_ep *ep; 2817 + u16 i; 2818 + u8 udc_csr_epix; 2819 + 2820 + /* SET_CONFIG irq ? */ 2821 + if (dev_irq & AMD_BIT(UDC_DEVINT_SC)) { 2822 + ret_val = IRQ_HANDLED; 2823 + 2824 + /* read config value */ 2825 + tmp = readl(&dev->regs->sts); 2826 + cfg = AMD_GETBITS(tmp, UDC_DEVSTS_CFG); 2827 + DBG(dev, "SET_CONFIG interrupt: config=%d\n", cfg); 2828 + dev->cur_config = cfg; 2829 + dev->set_cfg_not_acked = 1; 2830 + 2831 + /* make usb request for gadget driver */ 2832 + memset(&setup_data, 0 , sizeof(union udc_setup_data)); 2833 + setup_data.request.bRequest = USB_REQ_SET_CONFIGURATION; 2834 + setup_data.request.wValue = dev->cur_config; 2835 + 2836 + /* programm the NE registers */ 2837 + for (i = 0; i < UDC_EP_NUM; i++) { 2838 + ep = &dev->ep[i]; 2839 + if (ep->in) { 2840 + 2841 + /* ep ix in UDC CSR register space */ 2842 + udc_csr_epix = ep->num; 2843 + 2844 + 2845 + /* OUT ep */ 2846 + } else { 2847 + /* ep ix in UDC CSR register space */ 2848 + udc_csr_epix = ep->num - UDC_CSR_EP_OUT_IX_OFS; 2849 + } 2850 + 2851 + tmp = readl(&dev->csr->ne[udc_csr_epix]); 2852 + /* ep cfg */ 2853 + tmp = AMD_ADDBITS(tmp, ep->dev->cur_config, 2854 + UDC_CSR_NE_CFG); 2855 + /* write reg */ 2856 + writel(tmp, &dev->csr->ne[udc_csr_epix]); 2857 + 2858 + /* clear stall bits */ 2859 + ep->halted = 0; 2860 + tmp = readl(&ep->regs->ctl); 2861 + tmp = tmp & AMD_CLEAR_BIT(UDC_EPCTL_S); 2862 + writel(tmp, &ep->regs->ctl); 2863 + } 2864 + /* call gadget zero with setup data received */ 2865 + spin_unlock(&dev->lock); 2866 + tmp = dev->driver->setup(&dev->gadget, &setup_data.request); 2867 + spin_lock(&dev->lock); 2868 + 2869 + } /* SET_INTERFACE ? */ 2870 + if (dev_irq & AMD_BIT(UDC_DEVINT_SI)) { 2871 + ret_val = IRQ_HANDLED; 2872 + 2873 + dev->set_cfg_not_acked = 1; 2874 + /* read interface and alt setting values */ 2875 + tmp = readl(&dev->regs->sts); 2876 + dev->cur_alt = AMD_GETBITS(tmp, UDC_DEVSTS_ALT); 2877 + dev->cur_intf = AMD_GETBITS(tmp, UDC_DEVSTS_INTF); 2878 + 2879 + /* make usb request for gadget driver */ 2880 + memset(&setup_data, 0 , sizeof(union udc_setup_data)); 2881 + setup_data.request.bRequest = USB_REQ_SET_INTERFACE; 2882 + setup_data.request.bRequestType = USB_RECIP_INTERFACE; 2883 + setup_data.request.wValue = dev->cur_alt; 2884 + setup_data.request.wIndex = dev->cur_intf; 2885 + 2886 + DBG(dev, "SET_INTERFACE interrupt: alt=%d intf=%d\n", 2887 + dev->cur_alt, dev->cur_intf); 2888 + 2889 + /* programm the NE registers */ 2890 + for (i = 0; i < UDC_EP_NUM; i++) { 2891 + ep = &dev->ep[i]; 2892 + if (ep->in) { 2893 + 2894 + /* ep ix in UDC CSR register space */ 2895 + udc_csr_epix = ep->num; 2896 + 2897 + 2898 + /* OUT ep */ 2899 + } else { 2900 + /* ep ix in UDC CSR register space */ 2901 + udc_csr_epix = ep->num - UDC_CSR_EP_OUT_IX_OFS; 2902 + } 2903 + 2904 + /* UDC CSR reg */ 2905 + /* set ep values */ 2906 + tmp = readl(&dev->csr->ne[udc_csr_epix]); 2907 + /* ep interface */ 2908 + tmp = AMD_ADDBITS(tmp, ep->dev->cur_intf, 2909 + UDC_CSR_NE_INTF); 2910 + /* tmp = AMD_ADDBITS(tmp, 2, UDC_CSR_NE_INTF); */ 2911 + /* ep alt */ 2912 + tmp = AMD_ADDBITS(tmp, ep->dev->cur_alt, 2913 + UDC_CSR_NE_ALT); 2914 + /* write reg */ 2915 + writel(tmp, &dev->csr->ne[udc_csr_epix]); 2916 + 2917 + /* clear stall bits */ 2918 + ep->halted = 0; 2919 + tmp = readl(&ep->regs->ctl); 2920 + tmp = tmp & AMD_CLEAR_BIT(UDC_EPCTL_S); 2921 + writel(tmp, &ep->regs->ctl); 2922 + } 2923 + 2924 + /* call gadget zero with setup data received */ 2925 + spin_unlock(&dev->lock); 2926 + tmp = dev->driver->setup(&dev->gadget, &setup_data.request); 2927 + spin_lock(&dev->lock); 2928 + 2929 + } /* USB reset */ 2930 + if (dev_irq & AMD_BIT(UDC_DEVINT_UR)) { 2931 + DBG(dev, "USB Reset interrupt\n"); 2932 + ret_val = IRQ_HANDLED; 2933 + 2934 + /* allow soft reset when suspend occurs */ 2935 + soft_reset_occured = 0; 2936 + 2937 + dev->waiting_zlp_ack_ep0in = 0; 2938 + dev->set_cfg_not_acked = 0; 2939 + 2940 + /* mask not needed interrupts */ 2941 + udc_mask_unused_interrupts(dev); 2942 + 2943 + /* call gadget to resume and reset configs etc. */ 2944 + spin_unlock(&dev->lock); 2945 + if (dev->sys_suspended && dev->driver->resume) { 2946 + dev->driver->resume(&dev->gadget); 2947 + dev->sys_suspended = 0; 2948 + } 2949 + dev->driver->disconnect(&dev->gadget); 2950 + spin_lock(&dev->lock); 2951 + 2952 + /* disable ep0 to empty req queue */ 2953 + empty_req_queue(&dev->ep[UDC_EP0IN_IX]); 2954 + ep_init(dev->regs, &dev->ep[UDC_EP0IN_IX]); 2955 + 2956 + /* soft reset when rxfifo not empty */ 2957 + tmp = readl(&dev->regs->sts); 2958 + if (!(tmp & AMD_BIT(UDC_DEVSTS_RXFIFO_EMPTY)) 2959 + && !soft_reset_after_usbreset_occured) { 2960 + udc_soft_reset(dev); 2961 + soft_reset_after_usbreset_occured++; 2962 + } 2963 + 2964 + /* 2965 + * DMA reset to kill potential old DMA hw hang, 2966 + * POLL bit is already reset by ep_init() through 2967 + * disconnect() 2968 + */ 2969 + DBG(dev, "DMA machine reset\n"); 2970 + tmp = readl(&dev->regs->cfg); 2971 + writel(tmp | AMD_BIT(UDC_DEVCFG_DMARST), &dev->regs->cfg); 2972 + writel(tmp, &dev->regs->cfg); 2973 + 2974 + /* put into initial config */ 2975 + udc_basic_init(dev); 2976 + 2977 + /* enable device setup interrupts */ 2978 + udc_enable_dev_setup_interrupts(dev); 2979 + 2980 + /* enable suspend interrupt */ 2981 + tmp = readl(&dev->regs->irqmsk); 2982 + tmp &= AMD_UNMASK_BIT(UDC_DEVINT_US); 2983 + writel(tmp, &dev->regs->irqmsk); 2984 + 2985 + } /* USB suspend */ 2986 + if (dev_irq & AMD_BIT(UDC_DEVINT_US)) { 2987 + DBG(dev, "USB Suspend interrupt\n"); 2988 + ret_val = IRQ_HANDLED; 2989 + if (dev->driver->suspend) { 2990 + spin_unlock(&dev->lock); 2991 + dev->sys_suspended = 1; 2992 + dev->driver->suspend(&dev->gadget); 2993 + spin_lock(&dev->lock); 2994 + } 2995 + } /* new speed ? */ 2996 + if (dev_irq & AMD_BIT(UDC_DEVINT_ENUM)) { 2997 + DBG(dev, "ENUM interrupt\n"); 2998 + ret_val = IRQ_HANDLED; 2999 + soft_reset_after_usbreset_occured = 0; 3000 + 3001 + /* disable ep0 to empty req queue */ 3002 + empty_req_queue(&dev->ep[UDC_EP0IN_IX]); 3003 + ep_init(dev->regs, &dev->ep[UDC_EP0IN_IX]); 3004 + 3005 + /* link up all endpoints */ 3006 + udc_setup_endpoints(dev); 3007 + if (dev->gadget.speed == USB_SPEED_HIGH) { 3008 + dev_info(&dev->pdev->dev, "Connect: speed = %s\n", 3009 + "high"); 3010 + } else if (dev->gadget.speed == USB_SPEED_FULL) { 3011 + dev_info(&dev->pdev->dev, "Connect: speed = %s\n", 3012 + "full"); 3013 + } 3014 + 3015 + /* init ep 0 */ 3016 + activate_control_endpoints(dev); 3017 + 3018 + /* enable ep0 interrupts */ 3019 + udc_enable_ep0_interrupts(dev); 3020 + } 3021 + /* session valid change interrupt */ 3022 + if (dev_irq & AMD_BIT(UDC_DEVINT_SVC)) { 3023 + DBG(dev, "USB SVC interrupt\n"); 3024 + ret_val = IRQ_HANDLED; 3025 + 3026 + /* check that session is not valid to detect disconnect */ 3027 + tmp = readl(&dev->regs->sts); 3028 + if (!(tmp & AMD_BIT(UDC_DEVSTS_SESSVLD))) { 3029 + /* disable suspend interrupt */ 3030 + tmp = readl(&dev->regs->irqmsk); 3031 + tmp |= AMD_BIT(UDC_DEVINT_US); 3032 + writel(tmp, &dev->regs->irqmsk); 3033 + DBG(dev, "USB Disconnect (session valid low)\n"); 3034 + /* cleanup on disconnect */ 3035 + usb_disconnect(udc); 3036 + } 3037 + 3038 + } 3039 + 3040 + return ret_val; 3041 + } 3042 + 3043 + /* Interrupt Service Routine, see Linux Kernel Doc for parameters */ 3044 + static irqreturn_t udc_irq(int irq, void *pdev) 3045 + { 3046 + struct udc *dev = pdev; 3047 + u32 reg; 3048 + u16 i; 3049 + u32 ep_irq; 3050 + irqreturn_t ret_val = IRQ_NONE; 3051 + 3052 + spin_lock(&dev->lock); 3053 + 3054 + /* check for ep irq */ 3055 + reg = readl(&dev->regs->ep_irqsts); 3056 + if (reg) { 3057 + if (reg & AMD_BIT(UDC_EPINT_OUT_EP0)) 3058 + ret_val |= udc_control_out_isr(dev); 3059 + if (reg & AMD_BIT(UDC_EPINT_IN_EP0)) 3060 + ret_val |= udc_control_in_isr(dev); 3061 + 3062 + /* 3063 + * data endpoint 3064 + * iterate ep's 3065 + */ 3066 + for (i = 1; i < UDC_EP_NUM; i++) { 3067 + ep_irq = 1 << i; 3068 + if (!(reg & ep_irq) || i == UDC_EPINT_OUT_EP0) 3069 + continue; 3070 + 3071 + /* clear irq status */ 3072 + writel(ep_irq, &dev->regs->ep_irqsts); 3073 + 3074 + /* irq for out ep ? */ 3075 + if (i > UDC_EPIN_NUM) 3076 + ret_val |= udc_data_out_isr(dev, i); 3077 + else 3078 + ret_val |= udc_data_in_isr(dev, i); 3079 + } 3080 + 3081 + } 3082 + 3083 + 3084 + /* check for dev irq */ 3085 + reg = readl(&dev->regs->irqsts); 3086 + if (reg) { 3087 + /* clear irq */ 3088 + writel(reg, &dev->regs->irqsts); 3089 + ret_val |= udc_dev_isr(dev, reg); 3090 + } 3091 + 3092 + 3093 + spin_unlock(&dev->lock); 3094 + return ret_val; 3095 + } 3096 + 3097 + /* Tears down device */ 3098 + static void gadget_release(struct device *pdev) 3099 + { 3100 + struct amd5536udc *dev = dev_get_drvdata(pdev); 3101 + kfree(dev); 3102 + } 3103 + 3104 + /* Cleanup on device remove */ 3105 + static void udc_remove(struct udc *dev) 3106 + { 3107 + /* remove timer */ 3108 + stop_timer++; 3109 + if (timer_pending(&udc_timer)) 3110 + wait_for_completion(&on_exit); 3111 + if (udc_timer.data) 3112 + del_timer_sync(&udc_timer); 3113 + /* remove pollstall timer */ 3114 + stop_pollstall_timer++; 3115 + if (timer_pending(&udc_pollstall_timer)) 3116 + wait_for_completion(&on_pollstall_exit); 3117 + if (udc_pollstall_timer.data) 3118 + del_timer_sync(&udc_pollstall_timer); 3119 + udc = NULL; 3120 + } 3121 + 3122 + /* Reset all pci context */ 3123 + static void udc_pci_remove(struct pci_dev *pdev) 3124 + { 3125 + struct udc *dev; 3126 + 3127 + dev = pci_get_drvdata(pdev); 3128 + 3129 + /* gadget driver must not be registered */ 3130 + BUG_ON(dev->driver != NULL); 3131 + 3132 + /* dma pool cleanup */ 3133 + if (dev->data_requests) 3134 + pci_pool_destroy(dev->data_requests); 3135 + 3136 + if (dev->stp_requests) { 3137 + /* cleanup DMA desc's for ep0in */ 3138 + pci_pool_free(dev->stp_requests, 3139 + dev->ep[UDC_EP0OUT_IX].td_stp, 3140 + dev->ep[UDC_EP0OUT_IX].td_stp_dma); 3141 + pci_pool_free(dev->stp_requests, 3142 + dev->ep[UDC_EP0OUT_IX].td, 3143 + dev->ep[UDC_EP0OUT_IX].td_phys); 3144 + 3145 + pci_pool_destroy(dev->stp_requests); 3146 + } 3147 + 3148 + /* reset controller */ 3149 + writel(AMD_BIT(UDC_DEVCFG_SOFTRESET), &dev->regs->cfg); 3150 + if (dev->irq_registered) 3151 + free_irq(pdev->irq, dev); 3152 + if (dev->regs) 3153 + iounmap(dev->regs); 3154 + if (dev->mem_region) 3155 + release_mem_region(pci_resource_start(pdev, 0), 3156 + pci_resource_len(pdev, 0)); 3157 + if (dev->active) 3158 + pci_disable_device(pdev); 3159 + 3160 + device_unregister(&dev->gadget.dev); 3161 + pci_set_drvdata(pdev, NULL); 3162 + 3163 + udc_remove(dev); 3164 + } 3165 + 3166 + /* create dma pools on init */ 3167 + static int init_dma_pools(struct udc *dev) 3168 + { 3169 + struct udc_stp_dma *td_stp; 3170 + struct udc_data_dma *td_data; 3171 + int retval; 3172 + 3173 + /* consistent DMA mode setting ? */ 3174 + if (use_dma_ppb) { 3175 + use_dma_bufferfill_mode = 0; 3176 + } else { 3177 + use_dma_ppb_du = 0; 3178 + use_dma_bufferfill_mode = 1; 3179 + } 3180 + 3181 + /* DMA setup */ 3182 + dev->data_requests = dma_pool_create("data_requests", NULL, 3183 + sizeof(struct udc_data_dma), 0, 0); 3184 + if (!dev->data_requests) { 3185 + DBG(dev, "can't get request data pool\n"); 3186 + retval = -ENOMEM; 3187 + goto finished; 3188 + } 3189 + 3190 + /* EP0 in dma regs = dev control regs */ 3191 + dev->ep[UDC_EP0IN_IX].dma = &dev->regs->ctl; 3192 + 3193 + /* dma desc for setup data */ 3194 + dev->stp_requests = dma_pool_create("setup requests", NULL, 3195 + sizeof(struct udc_stp_dma), 0, 0); 3196 + if (!dev->stp_requests) { 3197 + DBG(dev, "can't get stp request pool\n"); 3198 + retval = -ENOMEM; 3199 + goto finished; 3200 + } 3201 + /* setup */ 3202 + td_stp = dma_pool_alloc(dev->stp_requests, GFP_KERNEL, 3203 + &dev->ep[UDC_EP0OUT_IX].td_stp_dma); 3204 + if (td_stp == NULL) { 3205 + retval = -ENOMEM; 3206 + goto finished; 3207 + } 3208 + dev->ep[UDC_EP0OUT_IX].td_stp = td_stp; 3209 + 3210 + /* data: 0 packets !? */ 3211 + td_data = dma_pool_alloc(dev->stp_requests, GFP_KERNEL, 3212 + &dev->ep[UDC_EP0OUT_IX].td_phys); 3213 + if (td_data == NULL) { 3214 + retval = -ENOMEM; 3215 + goto finished; 3216 + } 3217 + dev->ep[UDC_EP0OUT_IX].td = td_data; 3218 + return 0; 3219 + 3220 + finished: 3221 + return retval; 3222 + } 3223 + 3224 + /* Called by pci bus driver to init pci context */ 3225 + static int udc_pci_probe( 3226 + struct pci_dev *pdev, 3227 + const struct pci_device_id *id 3228 + ) 3229 + { 3230 + struct udc *dev; 3231 + unsigned long resource; 3232 + unsigned long len; 3233 + int retval = 0; 3234 + 3235 + /* one udc only */ 3236 + if (udc) { 3237 + dev_dbg(&pdev->dev, "already probed\n"); 3238 + return -EBUSY; 3239 + } 3240 + 3241 + /* init */ 3242 + dev = kzalloc(sizeof(struct udc), GFP_KERNEL); 3243 + if (!dev) { 3244 + retval = -ENOMEM; 3245 + goto finished; 3246 + } 3247 + memset(dev, 0, sizeof(struct udc)); 3248 + 3249 + /* pci setup */ 3250 + if (pci_enable_device(pdev) < 0) { 3251 + retval = -ENODEV; 3252 + goto finished; 3253 + } 3254 + dev->active = 1; 3255 + 3256 + /* PCI resource allocation */ 3257 + resource = pci_resource_start(pdev, 0); 3258 + len = pci_resource_len(pdev, 0); 3259 + 3260 + if (!request_mem_region(resource, len, name)) { 3261 + dev_dbg(&pdev->dev, "pci device used already\n"); 3262 + retval = -EBUSY; 3263 + goto finished; 3264 + } 3265 + dev->mem_region = 1; 3266 + 3267 + dev->virt_addr = ioremap_nocache(resource, len); 3268 + if (dev->virt_addr == NULL) { 3269 + dev_dbg(&pdev->dev, "start address cannot be mapped\n"); 3270 + retval = -EFAULT; 3271 + goto finished; 3272 + } 3273 + 3274 + if (!pdev->irq) { 3275 + dev_err(&dev->pdev->dev, "irq not set\n"); 3276 + retval = -ENODEV; 3277 + goto finished; 3278 + } 3279 + 3280 + if (request_irq(pdev->irq, udc_irq, IRQF_SHARED, name, dev) != 0) { 3281 + dev_dbg(&dev->pdev->dev, "request_irq(%d) fail\n", pdev->irq); 3282 + retval = -EBUSY; 3283 + goto finished; 3284 + } 3285 + dev->irq_registered = 1; 3286 + 3287 + pci_set_drvdata(pdev, dev); 3288 + 3289 + /* chip revision */ 3290 + dev->chiprev = 0; 3291 + 3292 + pci_set_master(pdev); 3293 + pci_set_mwi(pdev); 3294 + 3295 + /* chip rev for Hs AMD5536 */ 3296 + pci_read_config_byte(pdev, PCI_REVISION_ID, (u8 *) &dev->chiprev); 3297 + /* init dma pools */ 3298 + if (use_dma) { 3299 + retval = init_dma_pools(dev); 3300 + if (retval != 0) 3301 + goto finished; 3302 + } 3303 + 3304 + dev->phys_addr = resource; 3305 + dev->irq = pdev->irq; 3306 + dev->pdev = pdev; 3307 + dev->gadget.dev.parent = &pdev->dev; 3308 + dev->gadget.dev.dma_mask = pdev->dev.dma_mask; 3309 + 3310 + /* general probing */ 3311 + if (udc_probe(dev) == 0) 3312 + return 0; 3313 + 3314 + finished: 3315 + if (dev) 3316 + udc_pci_remove(pdev); 3317 + return retval; 3318 + } 3319 + 3320 + /* general probe */ 3321 + static int udc_probe(struct udc *dev) 3322 + { 3323 + char tmp[128]; 3324 + u32 reg; 3325 + int retval; 3326 + 3327 + /* mark timer as not initialized */ 3328 + udc_timer.data = 0; 3329 + udc_pollstall_timer.data = 0; 3330 + 3331 + /* device struct setup */ 3332 + spin_lock_init(&dev->lock); 3333 + dev->gadget.ops = &udc_ops; 3334 + 3335 + strcpy(dev->gadget.dev.bus_id, "gadget"); 3336 + dev->gadget.dev.release = gadget_release; 3337 + dev->gadget.name = name; 3338 + dev->gadget.name = name; 3339 + dev->gadget.is_dualspeed = 1; 3340 + 3341 + /* udc csr registers base */ 3342 + dev->csr = dev->virt_addr + UDC_CSR_ADDR; 3343 + /* dev registers base */ 3344 + dev->regs = dev->virt_addr + UDC_DEVCFG_ADDR; 3345 + /* ep registers base */ 3346 + dev->ep_regs = dev->virt_addr + UDC_EPREGS_ADDR; 3347 + /* fifo's base */ 3348 + dev->rxfifo = (u32 __iomem *)(dev->virt_addr + UDC_RXFIFO_ADDR); 3349 + dev->txfifo = (u32 __iomem *)(dev->virt_addr + UDC_TXFIFO_ADDR); 3350 + 3351 + /* init registers, interrupts, ... */ 3352 + startup_registers(dev); 3353 + 3354 + dev_info(&dev->pdev->dev, "%s\n", mod_desc); 3355 + 3356 + snprintf(tmp, sizeof tmp, "%d", dev->irq); 3357 + dev_info(&dev->pdev->dev, 3358 + "irq %s, pci mem %08lx, chip rev %02x(Geode5536 %s)\n", 3359 + tmp, dev->phys_addr, dev->chiprev, 3360 + (dev->chiprev == UDC_HSA0_REV) ? "A0" : "B1"); 3361 + strcpy(tmp, UDC_DRIVER_VERSION_STRING); 3362 + if (dev->chiprev == UDC_HSA0_REV) { 3363 + dev_err(&dev->pdev->dev, "chip revision is A0; too old\n"); 3364 + retval = -ENODEV; 3365 + goto finished; 3366 + } 3367 + dev_info(&dev->pdev->dev, 3368 + "driver version: %s(for Geode5536 B1)\n", tmp); 3369 + udc = dev; 3370 + 3371 + retval = device_register(&dev->gadget.dev); 3372 + if (retval) 3373 + goto finished; 3374 + 3375 + /* timer init */ 3376 + init_timer(&udc_timer); 3377 + udc_timer.function = udc_timer_function; 3378 + udc_timer.data = 1; 3379 + /* timer pollstall init */ 3380 + init_timer(&udc_pollstall_timer); 3381 + udc_pollstall_timer.function = udc_pollstall_timer_function; 3382 + udc_pollstall_timer.data = 1; 3383 + 3384 + /* set SD */ 3385 + reg = readl(&dev->regs->ctl); 3386 + reg |= AMD_BIT(UDC_DEVCTL_SD); 3387 + writel(reg, &dev->regs->ctl); 3388 + 3389 + /* print dev register info */ 3390 + print_regs(dev); 3391 + 3392 + return 0; 3393 + 3394 + finished: 3395 + return retval; 3396 + } 3397 + 3398 + /* Initiates a remote wakeup */ 3399 + static int udc_remote_wakeup(struct udc *dev) 3400 + { 3401 + unsigned long flags; 3402 + u32 tmp; 3403 + 3404 + DBG(dev, "UDC initiates remote wakeup\n"); 3405 + 3406 + spin_lock_irqsave(&dev->lock, flags); 3407 + 3408 + tmp = readl(&dev->regs->ctl); 3409 + tmp |= AMD_BIT(UDC_DEVCTL_RES); 3410 + writel(tmp, &dev->regs->ctl); 3411 + tmp &= AMD_CLEAR_BIT(UDC_DEVCTL_RES); 3412 + writel(tmp, &dev->regs->ctl); 3413 + 3414 + spin_unlock_irqrestore(&dev->lock, flags); 3415 + return 0; 3416 + } 3417 + 3418 + /* PCI device parameters */ 3419 + static const struct pci_device_id pci_id[] = { 3420 + { 3421 + PCI_DEVICE(PCI_VENDOR_ID_AMD, 0x2096), 3422 + .class = (PCI_CLASS_SERIAL_USB << 8) | 0xfe, 3423 + .class_mask = 0xffffffff, 3424 + }, 3425 + {}, 3426 + }; 3427 + MODULE_DEVICE_TABLE(pci, pci_id); 3428 + 3429 + /* PCI functions */ 3430 + static struct pci_driver udc_pci_driver = { 3431 + .name = (char *) name, 3432 + .id_table = pci_id, 3433 + .probe = udc_pci_probe, 3434 + .remove = udc_pci_remove, 3435 + }; 3436 + 3437 + /* Inits driver */ 3438 + static int __init init(void) 3439 + { 3440 + return pci_register_driver(&udc_pci_driver); 3441 + } 3442 + module_init(init); 3443 + 3444 + /* Cleans driver */ 3445 + static void __exit cleanup(void) 3446 + { 3447 + pci_unregister_driver(&udc_pci_driver); 3448 + } 3449 + module_exit(cleanup); 3450 + 3451 + MODULE_DESCRIPTION(UDC_MOD_DESCRIPTION); 3452 + MODULE_AUTHOR("Thomas Dahlmann"); 3453 + MODULE_LICENSE("GPL"); 3454 +
+626
drivers/usb/gadget/amd5536udc.h
··· 1 + /* 2 + * amd5536.h -- header for AMD 5536 UDC high/full speed USB device controller 3 + * 4 + * Copyright (C) 2007 AMD (http://www.amd.com) 5 + * Author: Thomas Dahlmann 6 + * 7 + * This program is free software; you can redistribute it and/or modify 8 + * it under the terms of the GNU General Public License as published by 9 + * the Free Software Foundation; either version 2 of the License, or 10 + * (at your option) any later version. 11 + * 12 + * This program is distributed in the hope that it will be useful, 13 + * but WITHOUT ANY WARRANTY; without even the implied warranty of 14 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 15 + * GNU General Public License for more details. 16 + * 17 + * You should have received a copy of the GNU General Public License 18 + * along with this program; if not, write to the Free Software 19 + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 20 + */ 21 + 22 + #ifndef AMD5536UDC_H 23 + #define AMD5536UDC_H 24 + 25 + /* various constants */ 26 + #define UDC_RDE_TIMER_SECONDS 1 27 + #define UDC_RDE_TIMER_DIV 10 28 + #define UDC_POLLSTALL_TIMER_USECONDS 500 29 + 30 + /* Hs AMD5536 chip rev. */ 31 + #define UDC_HSA0_REV 1 32 + #define UDC_HSB1_REV 2 33 + 34 + /* 35 + * SETUP usb commands 36 + * needed, because some SETUP's are handled in hw, but must be passed to 37 + * gadget driver above 38 + * SET_CONFIG 39 + */ 40 + #define UDC_SETCONFIG_DWORD0 0x00000900 41 + #define UDC_SETCONFIG_DWORD0_VALUE_MASK 0xffff0000 42 + #define UDC_SETCONFIG_DWORD0_VALUE_OFS 16 43 + 44 + #define UDC_SETCONFIG_DWORD1 0x00000000 45 + 46 + /* SET_INTERFACE */ 47 + #define UDC_SETINTF_DWORD0 0x00000b00 48 + #define UDC_SETINTF_DWORD0_ALT_MASK 0xffff0000 49 + #define UDC_SETINTF_DWORD0_ALT_OFS 16 50 + 51 + #define UDC_SETINTF_DWORD1 0x00000000 52 + #define UDC_SETINTF_DWORD1_INTF_MASK 0x0000ffff 53 + #define UDC_SETINTF_DWORD1_INTF_OFS 0 54 + 55 + /* Mass storage reset */ 56 + #define UDC_MSCRES_DWORD0 0x0000ff21 57 + #define UDC_MSCRES_DWORD1 0x00000000 58 + 59 + /* Global CSR's -------------------------------------------------------------*/ 60 + #define UDC_CSR_ADDR 0x500 61 + 62 + /* EP NE bits */ 63 + /* EP number */ 64 + #define UDC_CSR_NE_NUM_MASK 0x0000000f 65 + #define UDC_CSR_NE_NUM_OFS 0 66 + /* EP direction */ 67 + #define UDC_CSR_NE_DIR_MASK 0x00000010 68 + #define UDC_CSR_NE_DIR_OFS 4 69 + /* EP type */ 70 + #define UDC_CSR_NE_TYPE_MASK 0x00000060 71 + #define UDC_CSR_NE_TYPE_OFS 5 72 + /* EP config number */ 73 + #define UDC_CSR_NE_CFG_MASK 0x00000780 74 + #define UDC_CSR_NE_CFG_OFS 7 75 + /* EP interface number */ 76 + #define UDC_CSR_NE_INTF_MASK 0x00007800 77 + #define UDC_CSR_NE_INTF_OFS 11 78 + /* EP alt setting */ 79 + #define UDC_CSR_NE_ALT_MASK 0x00078000 80 + #define UDC_CSR_NE_ALT_OFS 15 81 + 82 + /* max pkt */ 83 + #define UDC_CSR_NE_MAX_PKT_MASK 0x3ff80000 84 + #define UDC_CSR_NE_MAX_PKT_OFS 19 85 + 86 + /* Device Config Register ---------------------------------------------------*/ 87 + #define UDC_DEVCFG_ADDR 0x400 88 + 89 + #define UDC_DEVCFG_SOFTRESET 31 90 + #define UDC_DEVCFG_HNPSFEN 30 91 + #define UDC_DEVCFG_DMARST 29 92 + #define UDC_DEVCFG_SET_DESC 18 93 + #define UDC_DEVCFG_CSR_PRG 17 94 + #define UDC_DEVCFG_STATUS 7 95 + #define UDC_DEVCFG_DIR 6 96 + #define UDC_DEVCFG_PI 5 97 + #define UDC_DEVCFG_SS 4 98 + #define UDC_DEVCFG_SP 3 99 + #define UDC_DEVCFG_RWKP 2 100 + 101 + #define UDC_DEVCFG_SPD_MASK 0x3 102 + #define UDC_DEVCFG_SPD_OFS 0 103 + #define UDC_DEVCFG_SPD_HS 0x0 104 + #define UDC_DEVCFG_SPD_FS 0x1 105 + #define UDC_DEVCFG_SPD_LS 0x2 106 + /*#define UDC_DEVCFG_SPD_FS 0x3*/ 107 + 108 + 109 + /* Device Control Register --------------------------------------------------*/ 110 + #define UDC_DEVCTL_ADDR 0x404 111 + 112 + #define UDC_DEVCTL_THLEN_MASK 0xff000000 113 + #define UDC_DEVCTL_THLEN_OFS 24 114 + 115 + #define UDC_DEVCTL_BRLEN_MASK 0x00ff0000 116 + #define UDC_DEVCTL_BRLEN_OFS 16 117 + 118 + #define UDC_DEVCTL_CSR_DONE 13 119 + #define UDC_DEVCTL_DEVNAK 12 120 + #define UDC_DEVCTL_SD 10 121 + #define UDC_DEVCTL_MODE 9 122 + #define UDC_DEVCTL_BREN 8 123 + #define UDC_DEVCTL_THE 7 124 + #define UDC_DEVCTL_BF 6 125 + #define UDC_DEVCTL_BE 5 126 + #define UDC_DEVCTL_DU 4 127 + #define UDC_DEVCTL_TDE 3 128 + #define UDC_DEVCTL_RDE 2 129 + #define UDC_DEVCTL_RES 0 130 + 131 + 132 + /* Device Status Register ---------------------------------------------------*/ 133 + #define UDC_DEVSTS_ADDR 0x408 134 + 135 + #define UDC_DEVSTS_TS_MASK 0xfffc0000 136 + #define UDC_DEVSTS_TS_OFS 18 137 + 138 + #define UDC_DEVSTS_SESSVLD 17 139 + #define UDC_DEVSTS_PHY_ERROR 16 140 + #define UDC_DEVSTS_RXFIFO_EMPTY 15 141 + 142 + #define UDC_DEVSTS_ENUM_SPEED_MASK 0x00006000 143 + #define UDC_DEVSTS_ENUM_SPEED_OFS 13 144 + #define UDC_DEVSTS_ENUM_SPEED_FULL 1 145 + #define UDC_DEVSTS_ENUM_SPEED_HIGH 0 146 + 147 + #define UDC_DEVSTS_SUSP 12 148 + 149 + #define UDC_DEVSTS_ALT_MASK 0x00000f00 150 + #define UDC_DEVSTS_ALT_OFS 8 151 + 152 + #define UDC_DEVSTS_INTF_MASK 0x000000f0 153 + #define UDC_DEVSTS_INTF_OFS 4 154 + 155 + #define UDC_DEVSTS_CFG_MASK 0x0000000f 156 + #define UDC_DEVSTS_CFG_OFS 0 157 + 158 + 159 + /* Device Interrupt Register ------------------------------------------------*/ 160 + #define UDC_DEVINT_ADDR 0x40c 161 + 162 + #define UDC_DEVINT_SVC 7 163 + #define UDC_DEVINT_ENUM 6 164 + #define UDC_DEVINT_SOF 5 165 + #define UDC_DEVINT_US 4 166 + #define UDC_DEVINT_UR 3 167 + #define UDC_DEVINT_ES 2 168 + #define UDC_DEVINT_SI 1 169 + #define UDC_DEVINT_SC 0 170 + 171 + /* Device Interrupt Mask Register -------------------------------------------*/ 172 + #define UDC_DEVINT_MSK_ADDR 0x410 173 + 174 + #define UDC_DEVINT_MSK 0x7f 175 + 176 + /* Endpoint Interrupt Register ----------------------------------------------*/ 177 + #define UDC_EPINT_ADDR 0x414 178 + 179 + #define UDC_EPINT_OUT_MASK 0xffff0000 180 + #define UDC_EPINT_OUT_OFS 16 181 + #define UDC_EPINT_IN_MASK 0x0000ffff 182 + #define UDC_EPINT_IN_OFS 0 183 + 184 + #define UDC_EPINT_IN_EP0 0 185 + #define UDC_EPINT_IN_EP1 1 186 + #define UDC_EPINT_IN_EP2 2 187 + #define UDC_EPINT_IN_EP3 3 188 + #define UDC_EPINT_OUT_EP0 16 189 + #define UDC_EPINT_OUT_EP1 17 190 + #define UDC_EPINT_OUT_EP2 18 191 + #define UDC_EPINT_OUT_EP3 19 192 + 193 + #define UDC_EPINT_EP0_ENABLE_MSK 0x001e001e 194 + 195 + /* Endpoint Interrupt Mask Register -----------------------------------------*/ 196 + #define UDC_EPINT_MSK_ADDR 0x418 197 + 198 + #define UDC_EPINT_OUT_MSK_MASK 0xffff0000 199 + #define UDC_EPINT_OUT_MSK_OFS 16 200 + #define UDC_EPINT_IN_MSK_MASK 0x0000ffff 201 + #define UDC_EPINT_IN_MSK_OFS 0 202 + 203 + #define UDC_EPINT_MSK_DISABLE_ALL 0xffffffff 204 + /* mask non-EP0 endpoints */ 205 + #define UDC_EPDATAINT_MSK_DISABLE 0xfffefffe 206 + /* mask all dev interrupts */ 207 + #define UDC_DEV_MSK_DISABLE 0x7f 208 + 209 + /* Endpoint-specific CSR's --------------------------------------------------*/ 210 + #define UDC_EPREGS_ADDR 0x0 211 + #define UDC_EPIN_REGS_ADDR 0x0 212 + #define UDC_EPOUT_REGS_ADDR 0x200 213 + 214 + #define UDC_EPCTL_ADDR 0x0 215 + 216 + #define UDC_EPCTL_RRDY 9 217 + #define UDC_EPCTL_CNAK 8 218 + #define UDC_EPCTL_SNAK 7 219 + #define UDC_EPCTL_NAK 6 220 + 221 + #define UDC_EPCTL_ET_MASK 0x00000030 222 + #define UDC_EPCTL_ET_OFS 4 223 + #define UDC_EPCTL_ET_CONTROL 0 224 + #define UDC_EPCTL_ET_ISO 1 225 + #define UDC_EPCTL_ET_BULK 2 226 + #define UDC_EPCTL_ET_INTERRUPT 3 227 + 228 + #define UDC_EPCTL_P 3 229 + #define UDC_EPCTL_SN 2 230 + #define UDC_EPCTL_F 1 231 + #define UDC_EPCTL_S 0 232 + 233 + /* Endpoint Status Registers ------------------------------------------------*/ 234 + #define UDC_EPSTS_ADDR 0x4 235 + 236 + #define UDC_EPSTS_RX_PKT_SIZE_MASK 0x007ff800 237 + #define UDC_EPSTS_RX_PKT_SIZE_OFS 11 238 + 239 + #define UDC_EPSTS_TDC 10 240 + #define UDC_EPSTS_HE 9 241 + #define UDC_EPSTS_BNA 7 242 + #define UDC_EPSTS_IN 6 243 + 244 + #define UDC_EPSTS_OUT_MASK 0x00000030 245 + #define UDC_EPSTS_OUT_OFS 4 246 + #define UDC_EPSTS_OUT_DATA 1 247 + #define UDC_EPSTS_OUT_DATA_CLEAR 0x10 248 + #define UDC_EPSTS_OUT_SETUP 2 249 + #define UDC_EPSTS_OUT_SETUP_CLEAR 0x20 250 + #define UDC_EPSTS_OUT_CLEAR 0x30 251 + 252 + /* Endpoint Buffer Size IN/ Receive Packet Frame Number OUT Registers ------*/ 253 + #define UDC_EPIN_BUFF_SIZE_ADDR 0x8 254 + #define UDC_EPOUT_FRAME_NUMBER_ADDR 0x8 255 + 256 + #define UDC_EPIN_BUFF_SIZE_MASK 0x0000ffff 257 + #define UDC_EPIN_BUFF_SIZE_OFS 0 258 + /* EP0in txfifo = 128 bytes*/ 259 + #define UDC_EPIN0_BUFF_SIZE 32 260 + /* EP0in fullspeed txfifo = 128 bytes*/ 261 + #define UDC_FS_EPIN0_BUFF_SIZE 32 262 + 263 + /* fifo size mult = fifo size / max packet */ 264 + #define UDC_EPIN_BUFF_SIZE_MULT 2 265 + 266 + /* EPin data fifo size = 1024 bytes DOUBLE BUFFERING */ 267 + #define UDC_EPIN_BUFF_SIZE 256 268 + /* EPin small INT data fifo size = 128 bytes */ 269 + #define UDC_EPIN_SMALLINT_BUFF_SIZE 32 270 + 271 + /* EPin fullspeed data fifo size = 128 bytes DOUBLE BUFFERING */ 272 + #define UDC_FS_EPIN_BUFF_SIZE 32 273 + 274 + #define UDC_EPOUT_FRAME_NUMBER_MASK 0x0000ffff 275 + #define UDC_EPOUT_FRAME_NUMBER_OFS 0 276 + 277 + /* Endpoint Buffer Size OUT/Max Packet Size Registers -----------------------*/ 278 + #define UDC_EPOUT_BUFF_SIZE_ADDR 0x0c 279 + #define UDC_EP_MAX_PKT_SIZE_ADDR 0x0c 280 + 281 + #define UDC_EPOUT_BUFF_SIZE_MASK 0xffff0000 282 + #define UDC_EPOUT_BUFF_SIZE_OFS 16 283 + #define UDC_EP_MAX_PKT_SIZE_MASK 0x0000ffff 284 + #define UDC_EP_MAX_PKT_SIZE_OFS 0 285 + /* EP0in max packet size = 64 bytes */ 286 + #define UDC_EP0IN_MAX_PKT_SIZE 64 287 + /* EP0out max packet size = 64 bytes */ 288 + #define UDC_EP0OUT_MAX_PKT_SIZE 64 289 + /* EP0in fullspeed max packet size = 64 bytes */ 290 + #define UDC_FS_EP0IN_MAX_PKT_SIZE 64 291 + /* EP0out fullspeed max packet size = 64 bytes */ 292 + #define UDC_FS_EP0OUT_MAX_PKT_SIZE 64 293 + 294 + /* 295 + * Endpoint dma descriptors ------------------------------------------------ 296 + * 297 + * Setup data, Status dword 298 + */ 299 + #define UDC_DMA_STP_STS_CFG_MASK 0x0fff0000 300 + #define UDC_DMA_STP_STS_CFG_OFS 16 301 + #define UDC_DMA_STP_STS_CFG_ALT_MASK 0x000f0000 302 + #define UDC_DMA_STP_STS_CFG_ALT_OFS 16 303 + #define UDC_DMA_STP_STS_CFG_INTF_MASK 0x00f00000 304 + #define UDC_DMA_STP_STS_CFG_INTF_OFS 20 305 + #define UDC_DMA_STP_STS_CFG_NUM_MASK 0x0f000000 306 + #define UDC_DMA_STP_STS_CFG_NUM_OFS 24 307 + #define UDC_DMA_STP_STS_RX_MASK 0x30000000 308 + #define UDC_DMA_STP_STS_RX_OFS 28 309 + #define UDC_DMA_STP_STS_BS_MASK 0xc0000000 310 + #define UDC_DMA_STP_STS_BS_OFS 30 311 + #define UDC_DMA_STP_STS_BS_HOST_READY 0 312 + #define UDC_DMA_STP_STS_BS_DMA_BUSY 1 313 + #define UDC_DMA_STP_STS_BS_DMA_DONE 2 314 + #define UDC_DMA_STP_STS_BS_HOST_BUSY 3 315 + /* IN data, Status dword */ 316 + #define UDC_DMA_IN_STS_TXBYTES_MASK 0x0000ffff 317 + #define UDC_DMA_IN_STS_TXBYTES_OFS 0 318 + #define UDC_DMA_IN_STS_FRAMENUM_MASK 0x07ff0000 319 + #define UDC_DMA_IN_STS_FRAMENUM_OFS 0 320 + #define UDC_DMA_IN_STS_L 27 321 + #define UDC_DMA_IN_STS_TX_MASK 0x30000000 322 + #define UDC_DMA_IN_STS_TX_OFS 28 323 + #define UDC_DMA_IN_STS_BS_MASK 0xc0000000 324 + #define UDC_DMA_IN_STS_BS_OFS 30 325 + #define UDC_DMA_IN_STS_BS_HOST_READY 0 326 + #define UDC_DMA_IN_STS_BS_DMA_BUSY 1 327 + #define UDC_DMA_IN_STS_BS_DMA_DONE 2 328 + #define UDC_DMA_IN_STS_BS_HOST_BUSY 3 329 + /* OUT data, Status dword */ 330 + #define UDC_DMA_OUT_STS_RXBYTES_MASK 0x0000ffff 331 + #define UDC_DMA_OUT_STS_RXBYTES_OFS 0 332 + #define UDC_DMA_OUT_STS_FRAMENUM_MASK 0x07ff0000 333 + #define UDC_DMA_OUT_STS_FRAMENUM_OFS 0 334 + #define UDC_DMA_OUT_STS_L 27 335 + #define UDC_DMA_OUT_STS_RX_MASK 0x30000000 336 + #define UDC_DMA_OUT_STS_RX_OFS 28 337 + #define UDC_DMA_OUT_STS_BS_MASK 0xc0000000 338 + #define UDC_DMA_OUT_STS_BS_OFS 30 339 + #define UDC_DMA_OUT_STS_BS_HOST_READY 0 340 + #define UDC_DMA_OUT_STS_BS_DMA_BUSY 1 341 + #define UDC_DMA_OUT_STS_BS_DMA_DONE 2 342 + #define UDC_DMA_OUT_STS_BS_HOST_BUSY 3 343 + /* max ep0in packet */ 344 + #define UDC_EP0IN_MAXPACKET 1000 345 + /* max dma packet */ 346 + #define UDC_DMA_MAXPACKET 65536 347 + 348 + /* un-usable DMA address */ 349 + #define DMA_DONT_USE (~(dma_addr_t) 0 ) 350 + 351 + /* other Endpoint register addresses and values-----------------------------*/ 352 + #define UDC_EP_SUBPTR_ADDR 0x10 353 + #define UDC_EP_DESPTR_ADDR 0x14 354 + #define UDC_EP_WRITE_CONFIRM_ADDR 0x1c 355 + 356 + /* EP number as layouted in AHB space */ 357 + #define UDC_EP_NUM 32 358 + #define UDC_EPIN_NUM 16 359 + #define UDC_EPIN_NUM_USED 5 360 + #define UDC_EPOUT_NUM 16 361 + /* EP number of EP's really used = EP0 + 8 data EP's */ 362 + #define UDC_USED_EP_NUM 9 363 + /* UDC CSR regs are aligned but AHB regs not - offset for OUT EP's */ 364 + #define UDC_CSR_EP_OUT_IX_OFS 12 365 + 366 + #define UDC_EP0OUT_IX 16 367 + #define UDC_EP0IN_IX 0 368 + 369 + /* Rx fifo address and size = 1k -------------------------------------------*/ 370 + #define UDC_RXFIFO_ADDR 0x800 371 + #define UDC_RXFIFO_SIZE 0x400 372 + 373 + /* Tx fifo address and size = 1.5k -----------------------------------------*/ 374 + #define UDC_TXFIFO_ADDR 0xc00 375 + #define UDC_TXFIFO_SIZE 0x600 376 + 377 + /* default data endpoints --------------------------------------------------*/ 378 + #define UDC_EPIN_STATUS_IX 1 379 + #define UDC_EPIN_IX 2 380 + #define UDC_EPOUT_IX 18 381 + 382 + /* general constants -------------------------------------------------------*/ 383 + #define UDC_DWORD_BYTES 4 384 + #define UDC_BITS_PER_BYTE_SHIFT 3 385 + #define UDC_BYTE_MASK 0xff 386 + #define UDC_BITS_PER_BYTE 8 387 + 388 + /*---------------------------------------------------------------------------*/ 389 + /* UDC CSR's */ 390 + struct udc_csrs { 391 + 392 + /* sca - setup command address */ 393 + u32 sca; 394 + 395 + /* ep ne's */ 396 + u32 ne[UDC_USED_EP_NUM]; 397 + } __attribute__ ((packed)); 398 + 399 + /* AHB subsystem CSR registers */ 400 + struct udc_regs { 401 + 402 + /* device configuration */ 403 + u32 cfg; 404 + 405 + /* device control */ 406 + u32 ctl; 407 + 408 + /* device status */ 409 + u32 sts; 410 + 411 + /* device interrupt */ 412 + u32 irqsts; 413 + 414 + /* device interrupt mask */ 415 + u32 irqmsk; 416 + 417 + /* endpoint interrupt */ 418 + u32 ep_irqsts; 419 + 420 + /* endpoint interrupt mask */ 421 + u32 ep_irqmsk; 422 + } __attribute__ ((packed)); 423 + 424 + /* endpoint specific registers */ 425 + struct udc_ep_regs { 426 + 427 + /* endpoint control */ 428 + u32 ctl; 429 + 430 + /* endpoint status */ 431 + u32 sts; 432 + 433 + /* endpoint buffer size in/ receive packet frame number out */ 434 + u32 bufin_framenum; 435 + 436 + /* endpoint buffer size out/max packet size */ 437 + u32 bufout_maxpkt; 438 + 439 + /* endpoint setup buffer pointer */ 440 + u32 subptr; 441 + 442 + /* endpoint data descriptor pointer */ 443 + u32 desptr; 444 + 445 + /* reserverd */ 446 + u32 reserved; 447 + 448 + /* write/read confirmation */ 449 + u32 confirm; 450 + 451 + } __attribute__ ((packed)); 452 + 453 + /* control data DMA desc */ 454 + struct udc_stp_dma { 455 + /* status quadlet */ 456 + u32 status; 457 + /* reserved */ 458 + u32 _reserved; 459 + /* first setup word */ 460 + u32 data12; 461 + /* second setup word */ 462 + u32 data34; 463 + } __attribute__ ((aligned (16))); 464 + 465 + /* normal data DMA desc */ 466 + struct udc_data_dma { 467 + /* status quadlet */ 468 + u32 status; 469 + /* reserved */ 470 + u32 _reserved; 471 + /* buffer pointer */ 472 + u32 bufptr; 473 + /* next descriptor pointer */ 474 + u32 next; 475 + } __attribute__ ((aligned (16))); 476 + 477 + /* request packet */ 478 + struct udc_request { 479 + /* embedded gadget ep */ 480 + struct usb_request req; 481 + 482 + /* flags */ 483 + unsigned dma_going : 1, 484 + dma_mapping : 1, 485 + dma_done : 1; 486 + /* phys. address */ 487 + dma_addr_t td_phys; 488 + /* first dma desc. of chain */ 489 + struct udc_data_dma *td_data; 490 + /* last dma desc. of chain */ 491 + struct udc_data_dma *td_data_last; 492 + struct list_head queue; 493 + 494 + /* chain length */ 495 + unsigned chain_len; 496 + 497 + }; 498 + 499 + /* UDC specific endpoint parameters */ 500 + struct udc_ep { 501 + struct usb_ep ep; 502 + struct udc_ep_regs __iomem *regs; 503 + u32 __iomem *txfifo; 504 + u32 __iomem *dma; 505 + dma_addr_t td_phys; 506 + dma_addr_t td_stp_dma; 507 + struct udc_stp_dma *td_stp; 508 + struct udc_data_dma *td; 509 + /* temp request */ 510 + struct udc_request *req; 511 + unsigned req_used; 512 + unsigned req_completed; 513 + /* dummy DMA desc for BNA dummy */ 514 + struct udc_request *bna_dummy_req; 515 + unsigned bna_occurred; 516 + 517 + /* NAK state */ 518 + unsigned naking; 519 + 520 + struct udc *dev; 521 + 522 + /* queue for requests */ 523 + struct list_head queue; 524 + const struct usb_endpoint_descriptor *desc; 525 + unsigned halted; 526 + unsigned cancel_transfer; 527 + unsigned num : 5, 528 + fifo_depth : 14, 529 + in : 1; 530 + }; 531 + 532 + /* device struct */ 533 + struct udc { 534 + struct usb_gadget gadget; 535 + spinlock_t lock; /* protects all state */ 536 + /* all endpoints */ 537 + struct udc_ep ep[UDC_EP_NUM]; 538 + struct usb_gadget_driver *driver; 539 + /* operational flags */ 540 + unsigned active : 1, 541 + stall_ep0in : 1, 542 + waiting_zlp_ack_ep0in : 1, 543 + set_cfg_not_acked : 1, 544 + irq_registered : 1, 545 + data_ep_enabled : 1, 546 + data_ep_queued : 1, 547 + mem_region : 1, 548 + sys_suspended : 1, 549 + connected; 550 + 551 + u16 chiprev; 552 + 553 + /* registers */ 554 + struct pci_dev *pdev; 555 + struct udc_csrs __iomem *csr; 556 + struct udc_regs __iomem *regs; 557 + struct udc_ep_regs __iomem *ep_regs; 558 + u32 __iomem *rxfifo; 559 + u32 __iomem *txfifo; 560 + 561 + /* DMA desc pools */ 562 + struct pci_pool *data_requests; 563 + struct pci_pool *stp_requests; 564 + 565 + /* device data */ 566 + unsigned long phys_addr; 567 + void __iomem *virt_addr; 568 + unsigned irq; 569 + 570 + /* states */ 571 + u16 cur_config; 572 + u16 cur_intf; 573 + u16 cur_alt; 574 + }; 575 + 576 + /* setup request data */ 577 + union udc_setup_data { 578 + u32 data[2]; 579 + struct usb_ctrlrequest request; 580 + }; 581 + 582 + /* 583 + *--------------------------------------------------------------------------- 584 + * SET and GET bitfields in u32 values 585 + * via constants for mask/offset: 586 + * <bit_field_stub_name> is the text between 587 + * UDC_ and _MASK|_OFS of appropiate 588 + * constant 589 + * 590 + * set bitfield value in u32 u32Val 591 + */ 592 + #define AMD_ADDBITS(u32Val, bitfield_val, bitfield_stub_name) \ 593 + (((u32Val) & (((u32) ~((u32) bitfield_stub_name##_MASK)))) \ 594 + | (((bitfield_val) << ((u32) bitfield_stub_name##_OFS)) \ 595 + & ((u32) bitfield_stub_name##_MASK))) 596 + 597 + /* 598 + * set bitfield value in zero-initialized u32 u32Val 599 + * => bitfield bits in u32Val are all zero 600 + */ 601 + #define AMD_INIT_SETBITS(u32Val, bitfield_val, bitfield_stub_name) \ 602 + ((u32Val) \ 603 + | (((bitfield_val) << ((u32) bitfield_stub_name##_OFS)) \ 604 + & ((u32) bitfield_stub_name##_MASK))) 605 + 606 + /* get bitfield value from u32 u32Val */ 607 + #define AMD_GETBITS(u32Val, bitfield_stub_name) \ 608 + ((u32Val & ((u32) bitfield_stub_name##_MASK)) \ 609 + >> ((u32) bitfield_stub_name##_OFS)) 610 + 611 + /* SET and GET bits in u32 values ------------------------------------------*/ 612 + #define AMD_BIT(bit_stub_name) (1 << bit_stub_name) 613 + #define AMD_UNMASK_BIT(bit_stub_name) (~AMD_BIT(bit_stub_name)) 614 + #define AMD_CLEAR_BIT(bit_stub_name) (~AMD_BIT(bit_stub_name)) 615 + 616 + /* debug macros ------------------------------------------------------------*/ 617 + 618 + #define DBG(udc , args...) dev_dbg(&(udc)->pdev->dev, args) 619 + 620 + #ifdef UDC_VERBOSE 621 + #define VDBG DBG 622 + #else 623 + #define VDBG(udc , args...) do {} while (0) 624 + #endif 625 + 626 + #endif /* #ifdef AMD5536UDC_H */
+4
drivers/usb/gadget/ether.c
··· 305 305 #define DEV_CONFIG_CDC 306 306 #endif 307 307 308 + #ifdef CONFIG_USB_GADGET_AMD5536UDC 309 + #define DEV_CONFIG_CDC 310 + #endif 311 + 308 312 309 313 /*-------------------------------------------------------------------------*/ 310 314
+9 -1
drivers/usb/gadget/gadget_chips.h
··· 17 17 #define gadget_is_net2280(g) 0 18 18 #endif 19 19 20 + #ifdef CONFIG_USB_GADGET_AMD5536UDC 21 + #define gadget_is_amd5536udc(g) !strcmp("amd5536udc", (g)->name) 22 + #else 23 + #define gadget_is_amd5536udc(g) 0 24 + #endif 25 + 20 26 #ifdef CONFIG_USB_GADGET_DUMMY_HCD 21 27 #define gadget_is_dummy(g) !strcmp("dummy_udc", (g)->name) 22 28 #else ··· 208 202 return 0x18; 209 203 else if (gadget_is_fsl_usb2(gadget)) 210 204 return 0x19; 211 - else if (gadget_is_m66592(gadget)) 205 + else if (gadget_is_amd5536udc(gadget)) 212 206 return 0x20; 207 + else if (gadget_is_m66592(gadget)) 208 + return 0x21; 213 209 return -ENOENT; 214 210 }
+127 -128
drivers/usb/gadget/m66592-udc.c
··· 21 21 */ 22 22 23 23 #include <linux/module.h> 24 - #include <linux/kernel.h> 25 - #include <linux/sched.h> 26 - #include <linux/smp_lock.h> 27 - #include <linux/errno.h> 28 - #include <linux/init.h> 29 - #include <linux/timer.h> 30 - #include <linux/delay.h> 31 - #include <linux/list.h> 32 24 #include <linux/interrupt.h> 25 + #include <linux/delay.h> 26 + #include <linux/io.h> 33 27 #include <linux/platform_device.h> 28 + 34 29 #include <linux/usb/ch9.h> 35 30 #include <linux/usb_gadget.h> 36 31 37 - #include <asm/io.h> 38 - #include <asm/irq.h> 39 - #include <asm/system.h> 40 - 41 32 #include "m66592-udc.h" 42 33 43 - MODULE_DESCRIPTION("M66592 USB gadget driiver"); 34 + 35 + MODULE_DESCRIPTION("M66592 USB gadget driver"); 44 36 MODULE_LICENSE("GPL"); 45 37 MODULE_AUTHOR("Yoshihiro Shimoda"); 46 38 ··· 41 49 /* module parameters */ 42 50 static unsigned short clock = M66592_XTAL24; 43 51 module_param(clock, ushort, 0644); 44 - MODULE_PARM_DESC(clock, "input clock: 48MHz=32768, 24MHz=16384, 12MHz=0(default=16384)"); 52 + MODULE_PARM_DESC(clock, "input clock: 48MHz=32768, 24MHz=16384, 12MHz=0 " 53 + "(default=16384)"); 54 + 45 55 static unsigned short vif = M66592_LDRV; 46 56 module_param(vif, ushort, 0644); 47 - MODULE_PARM_DESC(vif, "input VIF: 3.3V=32768, 1.5V=0(default=32768)"); 48 - static unsigned short endian = 0; 57 + MODULE_PARM_DESC(vif, "input VIF: 3.3V=32768, 1.5V=0 (default=32768)"); 58 + 59 + static unsigned short endian; 49 60 module_param(endian, ushort, 0644); 50 - MODULE_PARM_DESC(endian, "data endian: big=256, little=0(default=0)"); 61 + MODULE_PARM_DESC(endian, "data endian: big=256, little=0 (default=0)"); 62 + 51 63 static unsigned short irq_sense = M66592_INTL; 52 64 module_param(irq_sense, ushort, 0644); 53 - MODULE_PARM_DESC(irq_sense, "IRQ sense: low level=2, falling edge=0(default=2)"); 65 + MODULE_PARM_DESC(irq_sense, "IRQ sense: low level=2, falling edge=0 " 66 + "(default=2)"); 54 67 55 68 static const char udc_name[] = "m66592_udc"; 56 69 static const char *m66592_ep_name[] = { ··· 69 72 gfp_t gfp_flags); 70 73 71 74 static void transfer_complete(struct m66592_ep *ep, 72 - struct m66592_request *req, 73 - int status); 75 + struct m66592_request *req, int status); 76 + 74 77 /*-------------------------------------------------------------------------*/ 75 78 static inline u16 get_usb_speed(struct m66592 *m66592) 76 79 { ··· 78 81 } 79 82 80 83 static void enable_pipe_irq(struct m66592 *m66592, u16 pipenum, 81 - unsigned long reg) 84 + unsigned long reg) 82 85 { 83 86 u16 tmp; 84 87 85 88 tmp = m66592_read(m66592, M66592_INTENB0); 86 89 m66592_bclr(m66592, M66592_BEMPE | M66592_NRDYE | M66592_BRDYE, 87 - M66592_INTENB0); 90 + M66592_INTENB0); 88 91 m66592_bset(m66592, (1 << pipenum), reg); 89 92 m66592_write(m66592, tmp, M66592_INTENB0); 90 93 } 91 94 92 95 static void disable_pipe_irq(struct m66592 *m66592, u16 pipenum, 93 - unsigned long reg) 96 + unsigned long reg) 94 97 { 95 98 u16 tmp; 96 99 97 100 tmp = m66592_read(m66592, M66592_INTENB0); 98 101 m66592_bclr(m66592, M66592_BEMPE | M66592_NRDYE | M66592_BRDYE, 99 - M66592_INTENB0); 102 + M66592_INTENB0); 100 103 m66592_bclr(m66592, (1 << pipenum), reg); 101 104 m66592_write(m66592, tmp, M66592_INTENB0); 102 105 } ··· 105 108 { 106 109 m66592_bset(m66592, M66592_CTRE, M66592_INTENB0); 107 110 m66592_bset(m66592, M66592_WDST | M66592_RDST | M66592_CMPL, 108 - M66592_INTENB0); 111 + M66592_INTENB0); 109 112 m66592_bset(m66592, M66592_BEMPE | M66592_BRDYE, M66592_INTENB0); 110 113 111 114 m66592_bset(m66592, M66592_DPRPU, M66592_SYSCFG); 112 115 } 113 116 114 117 static void m66592_usb_disconnect(struct m66592 *m66592) 118 + __releases(m66592->lock) 119 + __acquires(m66592->lock) 115 120 { 116 121 m66592_bclr(m66592, M66592_CTRE, M66592_INTENB0); 117 122 m66592_bclr(m66592, M66592_WDST | M66592_RDST | M66592_CMPL, 118 - M66592_INTENB0); 123 + M66592_INTENB0); 119 124 m66592_bclr(m66592, M66592_BEMPE | M66592_BRDYE, M66592_INTENB0); 120 125 m66592_bclr(m66592, M66592_DPRPU, M66592_SYSCFG); 121 126 ··· 147 148 } 148 149 149 150 static inline void control_reg_set_pid(struct m66592 *m66592, u16 pipenum, 150 - u16 pid) 151 + u16 pid) 151 152 { 152 153 unsigned long offset; 153 154 ··· 249 250 } 250 251 251 252 static int pipe_buffer_setting(struct m66592 *m66592, 252 - struct m66592_pipe_info *info) 253 + struct m66592_pipe_info *info) 253 254 { 254 255 u16 bufnum = 0, buf_bsize = 0; 255 256 u16 pipecfg = 0; ··· 286 287 } 287 288 if (m66592->bi_bufnum > M66592_MAX_BUFNUM) { 288 289 printk(KERN_ERR "m66592 pipe memory is insufficient(%d)\n", 289 - m66592->bi_bufnum); 290 + m66592->bi_bufnum); 290 291 return -ENOMEM; 291 292 } 292 293 ··· 327 328 m66592->bulk--; 328 329 } else 329 330 printk(KERN_ERR "ep_release: unexpect pipenum (%d)\n", 330 - info->pipe); 331 + info->pipe); 331 332 } 332 333 333 334 static void pipe_initialize(struct m66592_ep *ep) ··· 349 350 } 350 351 351 352 static void m66592_ep_setting(struct m66592 *m66592, struct m66592_ep *ep, 352 - const struct usb_endpoint_descriptor *desc, 353 - u16 pipenum, int dma) 353 + const struct usb_endpoint_descriptor *desc, 354 + u16 pipenum, int dma) 354 355 { 355 356 if ((pipenum != 0) && dma) { 356 357 if (m66592->num_dma == 0) { ··· 384 385 385 386 ep->pipectr = get_pipectr_addr(pipenum); 386 387 ep->pipenum = pipenum; 387 - ep->ep.maxpacket = desc->wMaxPacketSize; 388 + ep->ep.maxpacket = le16_to_cpu(desc->wMaxPacketSize); 388 389 m66592->pipenum2ep[pipenum] = ep; 389 390 m66592->epaddr2ep[desc->bEndpointAddress&USB_ENDPOINT_NUMBER_MASK] = ep; 390 391 INIT_LIST_HEAD(&ep->queue); ··· 406 407 } 407 408 408 409 static int alloc_pipe_config(struct m66592_ep *ep, 409 - const struct usb_endpoint_descriptor *desc) 410 + const struct usb_endpoint_descriptor *desc) 410 411 { 411 412 struct m66592 *m66592 = ep->m66592; 412 413 struct m66592_pipe_info info; ··· 418 419 419 420 BUG_ON(ep->pipenum); 420 421 421 - switch(desc->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK) { 422 + switch (desc->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK) { 422 423 case USB_ENDPOINT_XFER_BULK: 423 424 if (m66592->bulk >= M66592_MAX_NUM_BULK) { 424 425 if (m66592->isochronous >= M66592_MAX_NUM_ISOC) { 425 426 printk(KERN_ERR "bulk pipe is insufficient\n"); 426 427 return -ENODEV; 427 428 } else { 428 - info.pipe = M66592_BASE_PIPENUM_ISOC + 429 - m66592->isochronous; 429 + info.pipe = M66592_BASE_PIPENUM_ISOC 430 + + m66592->isochronous; 430 431 counter = &m66592->isochronous; 431 432 } 432 433 } else { ··· 461 462 ep->type = info.type; 462 463 463 464 info.epnum = desc->bEndpointAddress & USB_ENDPOINT_NUMBER_MASK; 464 - info.maxpacket = desc->wMaxPacketSize; 465 + info.maxpacket = le16_to_cpu(desc->wMaxPacketSize); 465 466 info.interval = desc->bInterval; 466 467 if (desc->bEndpointAddress & USB_ENDPOINT_DIR_MASK) 467 468 info.dir_in = 1; ··· 524 525 525 526 pipe_change(m66592, ep->pipenum); 526 527 m66592_mdfy(m66592, M66592_ISEL | M66592_PIPE0, 527 - (M66592_ISEL | M66592_CURPIPE), 528 - M66592_CFIFOSEL); 528 + (M66592_ISEL | M66592_CURPIPE), 529 + M66592_CFIFOSEL); 529 530 m66592_write(m66592, M66592_BCLR, ep->fifoctr); 530 531 if (req->req.length == 0) { 531 532 m66592_bset(m66592, M66592_BVAL, ep->fifoctr); ··· 560 561 561 562 if (ep->pipenum == 0) { 562 563 m66592_mdfy(m66592, M66592_PIPE0, 563 - (M66592_ISEL | M66592_CURPIPE), 564 - M66592_CFIFOSEL); 564 + (M66592_ISEL | M66592_CURPIPE), 565 + M66592_CFIFOSEL); 565 566 m66592_write(m66592, M66592_BCLR, ep->fifoctr); 566 567 pipe_start(m66592, pipenum); 567 568 pipe_irq_enable(m66592, pipenum); ··· 571 572 pipe_change(m66592, pipenum); 572 573 m66592_bset(m66592, M66592_TRENB, ep->fifosel); 573 574 m66592_write(m66592, 574 - (req->req.length + ep->ep.maxpacket - 1) / 575 - ep->ep.maxpacket, ep->fifotrn); 575 + (req->req.length + ep->ep.maxpacket - 1) 576 + / ep->ep.maxpacket, 577 + ep->fifotrn); 576 578 } 577 579 pipe_start(m66592, pipenum); /* trigger once */ 578 580 pipe_irq_enable(m66592, pipenum); ··· 614 614 static void init_controller(struct m66592 *m66592) 615 615 { 616 616 m66592_bset(m66592, (vif & M66592_LDRV) | (endian & M66592_BIGEND), 617 - M66592_PINCFG); 617 + M66592_PINCFG); 618 618 m66592_bset(m66592, M66592_HSE, M66592_SYSCFG); /* High spd */ 619 619 m66592_mdfy(m66592, clock & M66592_XTAL, M66592_XTAL, M66592_SYSCFG); 620 620 ··· 634 634 635 635 m66592_bset(m66592, irq_sense & M66592_INTL, M66592_INTENB1); 636 636 m66592_write(m66592, M66592_BURST | M66592_CPU_ADR_RD_WR, 637 - M66592_DMA0CFG); 637 + M66592_DMA0CFG); 638 638 } 639 639 640 640 static void disable_controller(struct m66592 *m66592) ··· 659 659 660 660 /*-------------------------------------------------------------------------*/ 661 661 static void transfer_complete(struct m66592_ep *ep, 662 - struct m66592_request *req, 663 - int status) 662 + struct m66592_request *req, int status) 663 + __releases(m66592->lock) 664 + __acquires(m66592->lock) 664 665 { 665 666 int restart = 0; 666 667 ··· 681 680 if (!list_empty(&ep->queue)) 682 681 restart = 1; 683 682 684 - if (likely(req->req.complete)) 685 - req->req.complete(&ep->ep, &req->req); 683 + spin_unlock(&ep->m66592->lock); 684 + req->req.complete(&ep->ep, &req->req); 685 + spin_lock(&ep->m66592->lock); 686 686 687 687 if (restart) { 688 688 req = list_entry(ep->queue.next, struct m66592_request, queue); ··· 695 693 static void irq_ep0_write(struct m66592_ep *ep, struct m66592_request *req) 696 694 { 697 695 int i; 698 - volatile u16 tmp; 696 + u16 tmp; 699 697 unsigned bufsize; 700 698 size_t size; 701 699 void *buf; ··· 733 731 req->req.actual += size; 734 732 735 733 /* check transfer finish */ 736 - if ((!req->req.zero && (req->req.actual == req->req.length)) || 737 - (size % ep->ep.maxpacket) || (size == 0)) { 734 + if ((!req->req.zero && (req->req.actual == req->req.length)) 735 + || (size % ep->ep.maxpacket) 736 + || (size == 0)) { 738 737 disable_irq_ready(m66592, pipenum); 739 738 disable_irq_empty(m66592, pipenum); 740 739 } else { ··· 771 768 /* write fifo */ 772 769 if (req->req.buf) { 773 770 m66592_write_fifo(m66592, ep->fifoaddr, buf, size); 774 - if ((size == 0) || ((size % ep->ep.maxpacket) != 0) || 775 - ((bufsize != ep->ep.maxpacket) && (bufsize > size))) 771 + if ((size == 0) 772 + || ((size % ep->ep.maxpacket) != 0) 773 + || ((bufsize != ep->ep.maxpacket) 774 + && (bufsize > size))) 776 775 m66592_bset(m66592, M66592_BVAL, ep->fifoctr); 777 776 } 778 777 779 778 /* update parameters */ 780 779 req->req.actual += size; 781 780 /* check transfer finish */ 782 - if ((!req->req.zero && (req->req.actual == req->req.length)) || 783 - (size % ep->ep.maxpacket) || (size == 0)) { 781 + if ((!req->req.zero && (req->req.actual == req->req.length)) 782 + || (size % ep->ep.maxpacket) 783 + || (size == 0)) { 784 784 disable_irq_ready(m66592, pipenum); 785 785 enable_irq_empty(m66592, pipenum); 786 786 } else { ··· 827 821 req->req.actual += size; 828 822 829 823 /* check transfer finish */ 830 - if ((!req->req.zero && (req->req.actual == req->req.length)) || 831 - (size % ep->ep.maxpacket) || (size == 0)) { 824 + if ((!req->req.zero && (req->req.actual == req->req.length)) 825 + || (size % ep->ep.maxpacket) 826 + || (size == 0)) { 832 827 pipe_stop(m66592, pipenum); 833 828 pipe_irq_disable(m66592, pipenum); 834 829 finish = 1; ··· 857 850 if ((status & M66592_BRDY0) && (enb & M66592_BRDY0)) { 858 851 m66592_write(m66592, ~M66592_BRDY0, M66592_BRDYSTS); 859 852 m66592_mdfy(m66592, M66592_PIPE0, M66592_CURPIPE, 860 - M66592_CFIFOSEL); 853 + M66592_CFIFOSEL); 861 854 862 855 ep = &m66592->ep[0]; 863 856 req = list_entry(ep->queue.next, struct m66592_request, queue); ··· 916 909 } 917 910 918 911 static void get_status(struct m66592 *m66592, struct usb_ctrlrequest *ctrl) 912 + __releases(m66592->lock) 913 + __acquires(m66592->lock) 919 914 { 920 915 struct m66592_ep *ep; 921 916 u16 pid; 922 917 u16 status = 0; 918 + u16 w_index = le16_to_cpu(ctrl->wIndex); 923 919 924 920 switch (ctrl->bRequestType & USB_RECIP_MASK) { 925 921 case USB_RECIP_DEVICE: 926 - status = 1; /* selfpower */ 922 + status = 1 << USB_DEVICE_SELF_POWERED; 927 923 break; 928 924 case USB_RECIP_INTERFACE: 929 925 status = 0; 930 926 break; 931 927 case USB_RECIP_ENDPOINT: 932 - ep = m66592->epaddr2ep[ctrl->wIndex&USB_ENDPOINT_NUMBER_MASK]; 928 + ep = m66592->epaddr2ep[w_index & USB_ENDPOINT_NUMBER_MASK]; 933 929 pid = control_reg_get_pid(m66592, ep->pipenum); 934 930 if (pid == M66592_PID_STALL) 935 - status = 1; 931 + status = 1 << USB_ENDPOINT_HALT; 936 932 else 937 933 status = 0; 938 934 break; ··· 944 934 return; /* exit */ 945 935 } 946 936 947 - *m66592->ep0_buf = status; 948 - m66592->ep0_req->buf = m66592->ep0_buf; 937 + m66592->ep0_data = cpu_to_le16(status); 938 + m66592->ep0_req->buf = &m66592->ep0_data; 949 939 m66592->ep0_req->length = 2; 950 940 /* AV: what happens if we get called again before that gets through? */ 941 + spin_unlock(&m66592->lock); 951 942 m66592_queue(m66592->gadget.ep0, m66592->ep0_req, GFP_KERNEL); 943 + spin_lock(&m66592->lock); 952 944 } 953 945 954 946 static void clear_feature(struct m66592 *m66592, struct usb_ctrlrequest *ctrl) ··· 965 953 case USB_RECIP_ENDPOINT: { 966 954 struct m66592_ep *ep; 967 955 struct m66592_request *req; 956 + u16 w_index = le16_to_cpu(ctrl->wIndex); 968 957 969 - ep = m66592->epaddr2ep[ctrl->wIndex&USB_ENDPOINT_NUMBER_MASK]; 958 + ep = m66592->epaddr2ep[w_index & USB_ENDPOINT_NUMBER_MASK]; 970 959 pipe_stop(m66592, ep->pipenum); 971 960 control_reg_sqclr(m66592, ep->pipenum); 972 961 ··· 1002 989 break; 1003 990 case USB_RECIP_ENDPOINT: { 1004 991 struct m66592_ep *ep; 992 + u16 w_index = le16_to_cpu(ctrl->wIndex); 1005 993 1006 - ep = m66592->epaddr2ep[ctrl->wIndex&USB_ENDPOINT_NUMBER_MASK]; 994 + ep = m66592->epaddr2ep[w_index & USB_ENDPOINT_NUMBER_MASK]; 1007 995 pipe_stall(m66592, ep->pipenum); 1008 996 1009 997 control_end(m66592, 1); ··· 1080 1066 } 1081 1067 if (m66592->old_dvsq == M66592_DS_CNFG && dvsq != M66592_DS_CNFG) 1082 1068 m66592_update_usb_speed(m66592); 1083 - if ((dvsq == M66592_DS_CNFG || dvsq == M66592_DS_ADDS) && 1084 - m66592->gadget.speed == USB_SPEED_UNKNOWN) 1069 + if ((dvsq == M66592_DS_CNFG || dvsq == M66592_DS_ADDS) 1070 + && m66592->gadget.speed == USB_SPEED_UNKNOWN) 1085 1071 m66592_update_usb_speed(m66592); 1086 1072 1087 1073 m66592->old_dvsq = dvsq; 1088 1074 } 1089 1075 1090 1076 static void irq_control_stage(struct m66592 *m66592) 1077 + __releases(m66592->lock) 1078 + __acquires(m66592->lock) 1091 1079 { 1092 1080 struct usb_ctrlrequest ctrl; 1093 1081 u16 ctsq; ··· 1111 1095 case M66592_CS_WRDS: 1112 1096 case M66592_CS_WRND: 1113 1097 if (setup_packet(m66592, &ctrl)) { 1098 + spin_unlock(&m66592->lock); 1114 1099 if (m66592->driver->setup(&m66592->gadget, &ctrl) < 0) 1115 1100 pipe_stall(m66592, 0); 1101 + spin_lock(&m66592->lock); 1116 1102 } 1117 1103 break; 1118 1104 case M66592_CS_RDSS: ··· 1137 1119 u16 savepipe; 1138 1120 u16 mask0; 1139 1121 1122 + spin_lock(&m66592->lock); 1123 + 1140 1124 intsts0 = m66592_read(m66592, M66592_INTSTS0); 1141 1125 intenb0 = m66592_read(m66592, M66592_INTENB0); 1142 1126 ··· 1154 1134 bempenb = m66592_read(m66592, M66592_BEMPENB); 1155 1135 1156 1136 if (mask0 & M66592_VBINT) { 1157 - m66592_write(m66592, (u16)~M66592_VBINT, 1158 - M66592_INTSTS0); 1137 + m66592_write(m66592, 0xffff & ~M66592_VBINT, 1138 + M66592_INTSTS0); 1159 1139 m66592_start_xclock(m66592); 1160 1140 1161 1141 /* start vbus sampling */ 1162 1142 m66592->old_vbus = m66592_read(m66592, M66592_INTSTS0) 1163 - & M66592_VBSTS; 1143 + & M66592_VBSTS; 1164 1144 m66592->scount = M66592_MAX_SAMPLING; 1165 1145 1166 1146 mod_timer(&m66592->timer, 1167 - jiffies + msecs_to_jiffies(50)); 1147 + jiffies + msecs_to_jiffies(50)); 1168 1148 } 1169 1149 if (intsts0 & M66592_DVSQ) 1170 1150 irq_device_state(m66592); 1171 1151 1172 - if ((intsts0 & M66592_BRDY) && (intenb0 & M66592_BRDYE) && 1173 - (brdysts & brdyenb)) { 1152 + if ((intsts0 & M66592_BRDY) && (intenb0 & M66592_BRDYE) 1153 + && (brdysts & brdyenb)) { 1174 1154 irq_pipe_ready(m66592, brdysts, brdyenb); 1175 1155 } 1176 - if ((intsts0 & M66592_BEMP) && (intenb0 & M66592_BEMPE) && 1177 - (bempsts & bempenb)) { 1156 + if ((intsts0 & M66592_BEMP) && (intenb0 & M66592_BEMPE) 1157 + && (bempsts & bempenb)) { 1178 1158 irq_pipe_empty(m66592, bempsts, bempenb); 1179 1159 } 1180 1160 ··· 1184 1164 1185 1165 m66592_write(m66592, savepipe, M66592_CFIFOSEL); 1186 1166 1167 + spin_unlock(&m66592->lock); 1187 1168 return IRQ_HANDLED; 1188 1169 } 1189 1170 ··· 1212 1191 m66592_usb_disconnect(m66592); 1213 1192 } else { 1214 1193 mod_timer(&m66592->timer, 1215 - jiffies + msecs_to_jiffies(50)); 1194 + jiffies + msecs_to_jiffies(50)); 1216 1195 } 1217 1196 } else { 1218 1197 m66592->scount = M66592_MAX_SAMPLING; 1219 1198 m66592->old_vbus = tmp; 1220 1199 mod_timer(&m66592->timer, 1221 - jiffies + msecs_to_jiffies(50)); 1200 + jiffies + msecs_to_jiffies(50)); 1222 1201 } 1223 1202 } 1224 1203 spin_unlock_irqrestore(&m66592->lock, flags); ··· 1356 1335 return ret; 1357 1336 } 1358 1337 1359 - static int m66592_fifo_status(struct usb_ep *_ep) 1360 - { 1361 - return -EOPNOTSUPP; 1362 - } 1363 - 1364 1338 static void m66592_fifo_flush(struct usb_ep *_ep) 1365 1339 { 1366 1340 struct m66592_ep *ep; ··· 1381 1365 .dequeue = m66592_dequeue, 1382 1366 1383 1367 .set_halt = m66592_set_halt, 1384 - .fifo_status = m66592_fifo_status, 1385 1368 .fifo_flush = m66592_fifo_flush, 1386 1369 }; 1387 1370 ··· 1392 1377 struct m66592 *m66592 = the_controller; 1393 1378 int retval; 1394 1379 1395 - if (!driver || 1396 - driver->speed != USB_SPEED_HIGH || 1397 - !driver->bind || 1398 - !driver->unbind || 1399 - !driver->setup) 1380 + if (!driver 1381 + || driver->speed != USB_SPEED_HIGH 1382 + || !driver->bind 1383 + || !driver->setup) 1400 1384 return -EINVAL; 1401 1385 if (!m66592) 1402 1386 return -ENODEV; ··· 1427 1413 m66592->old_vbus = m66592_read(m66592, 1428 1414 M66592_INTSTS0) & M66592_VBSTS; 1429 1415 m66592->scount = M66592_MAX_SAMPLING; 1430 - mod_timer(&m66592->timer, 1431 - jiffies + msecs_to_jiffies(50)); 1416 + mod_timer(&m66592->timer, jiffies + msecs_to_jiffies(50)); 1432 1417 } 1433 1418 1434 1419 return 0; ··· 1444 1431 { 1445 1432 struct m66592 *m66592 = the_controller; 1446 1433 unsigned long flags; 1434 + 1435 + if (driver != m66592->driver || !driver->unbind) 1436 + return -EINVAL; 1447 1437 1448 1438 spin_lock_irqsave(&m66592->lock, flags); 1449 1439 if (m66592->gadget.speed != USB_SPEED_UNKNOWN) ··· 1477 1461 .get_frame = m66592_get_frame, 1478 1462 }; 1479 1463 1480 - #if defined(CONFIG_PM) 1481 - static int m66592_suspend(struct platform_device *pdev, pm_message_t state) 1482 - { 1483 - pdev->dev.power.power_state = state; 1484 - return 0; 1485 - } 1486 - 1487 - static int m66592_resume(struct platform_device *pdev) 1488 - { 1489 - pdev->dev.power.power_state = PMSG_ON; 1490 - return 0; 1491 - } 1492 - #else /* if defined(CONFIG_PM) */ 1493 - #define m66592_suspend NULL 1494 - #define m66592_resume NULL 1495 - #endif 1496 - 1497 - static int __init_or_module m66592_remove(struct platform_device *pdev) 1464 + static int __exit m66592_remove(struct platform_device *pdev) 1498 1465 { 1499 1466 struct m66592 *m66592 = dev_get_drvdata(&pdev->dev); 1500 1467 1501 1468 del_timer_sync(&m66592->timer); 1502 1469 iounmap(m66592->reg); 1503 1470 free_irq(platform_get_irq(pdev, 0), m66592); 1471 + m66592_free_request(&m66592->ep[0].ep, m66592->ep0_req); 1504 1472 kfree(m66592); 1505 1473 return 0; 1506 1474 } 1507 1475 1476 + static void nop_completion(struct usb_ep *ep, struct usb_request *r) 1477 + { 1478 + } 1479 + 1508 1480 #define resource_len(r) (((r)->end - (r)->start) + 1) 1481 + 1509 1482 static int __init m66592_probe(struct platform_device *pdev) 1510 1483 { 1511 - struct resource *res = NULL; 1512 - int irq = -1; 1484 + struct resource *res; 1485 + int irq; 1513 1486 void __iomem *reg = NULL; 1514 1487 struct m66592 *m66592 = NULL; 1515 1488 int ret = 0; 1516 1489 int i; 1517 1490 1518 1491 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, 1519 - (char *)udc_name); 1492 + (char *)udc_name); 1520 1493 if (!res) { 1521 1494 ret = -ENODEV; 1522 1495 printk(KERN_ERR "platform_get_resource_byname error.\n"); ··· 1553 1548 m66592->bi_bufnum = M66592_BASE_BUFNUM; 1554 1549 1555 1550 ret = request_irq(irq, m66592_irq, IRQF_DISABLED | IRQF_SHARED, 1556 - udc_name, m66592); 1551 + udc_name, m66592); 1557 1552 if (ret < 0) { 1558 1553 printk(KERN_ERR "request_irq error (%d)\n", ret); 1559 1554 goto clean_up; ··· 1568 1563 if (i != 0) { 1569 1564 INIT_LIST_HEAD(&m66592->ep[i].ep.ep_list); 1570 1565 list_add_tail(&m66592->ep[i].ep.ep_list, 1571 - &m66592->gadget.ep_list); 1566 + &m66592->gadget.ep_list); 1572 1567 } 1573 1568 ep->m66592 = m66592; 1574 1569 INIT_LIST_HEAD(&ep->queue); ··· 1588 1583 1589 1584 the_controller = m66592; 1590 1585 1591 - /* AV: leaks */ 1592 1586 m66592->ep0_req = m66592_alloc_request(&m66592->ep[0].ep, GFP_KERNEL); 1593 1587 if (m66592->ep0_req == NULL) 1594 - goto clean_up; 1595 - /* AV: leaks, and do we really need it separately allocated? */ 1596 - m66592->ep0_buf = kzalloc(2, GFP_KERNEL); 1597 - if (m66592->ep0_buf == NULL) 1598 - goto clean_up; 1588 + goto clean_up2; 1589 + m66592->ep0_req->complete = nop_completion; 1599 1590 1600 1591 init_controller(m66592); 1601 1592 1602 - printk("driver %s, %s\n", udc_name, DRIVER_VERSION); 1593 + dev_info(&pdev->dev, "version %s\n", DRIVER_VERSION); 1603 1594 return 0; 1604 1595 1596 + clean_up2: 1597 + free_irq(irq, m66592); 1605 1598 clean_up: 1606 1599 if (m66592) { 1607 1600 if (m66592->ep0_req) ··· 1614 1611 1615 1612 /*-------------------------------------------------------------------------*/ 1616 1613 static struct platform_driver m66592_driver = { 1617 - .probe = m66592_probe, 1618 - .remove = m66592_remove, 1619 - .suspend = m66592_suspend, 1620 - .resume = m66592_resume, 1614 + .remove = __exit_p(m66592_remove), 1621 1615 .driver = { 1622 1616 .name = (char *) udc_name, 1623 1617 }, ··· 1622 1622 1623 1623 static int __init m66592_udc_init(void) 1624 1624 { 1625 - return platform_driver_register(&m66592_driver); 1625 + return platform_driver_probe(&m66592_driver, m66592_probe); 1626 1626 } 1627 1627 module_init(m66592_udc_init); 1628 1628 ··· 1631 1631 platform_driver_unregister(&m66592_driver); 1632 1632 } 1633 1633 module_exit(m66592_udc_cleanup); 1634 -
+304 -306
drivers/usb/gadget/m66592-udc.h
··· 24 24 #define __M66592_UDC_H__ 25 25 26 26 #define M66592_SYSCFG 0x00 27 - #define M66592_XTAL 0xC000 /* b15-14: Crystal selection */ 28 - #define M66592_XTAL48 0x8000 /* 48MHz */ 29 - #define M66592_XTAL24 0x4000 /* 24MHz */ 30 - #define M66592_XTAL12 0x0000 /* 12MHz */ 31 - #define M66592_XCKE 0x2000 /* b13: External clock enable */ 32 - #define M66592_RCKE 0x1000 /* b12: Register clock enable */ 33 - #define M66592_PLLC 0x0800 /* b11: PLL control */ 34 - #define M66592_SCKE 0x0400 /* b10: USB clock enable */ 35 - #define M66592_ATCKM 0x0100 /* b8: Automatic supply functional enable */ 36 - #define M66592_HSE 0x0080 /* b7: Hi-speed enable */ 37 - #define M66592_DCFM 0x0040 /* b6: Controller function select */ 38 - #define M66592_DMRPD 0x0020 /* b5: D- pull down control */ 39 - #define M66592_DPRPU 0x0010 /* b4: D+ pull up control */ 40 - #define M66592_FSRPC 0x0004 /* b2: Full-speed receiver enable */ 41 - #define M66592_PCUT 0x0002 /* b1: Low power sleep enable */ 42 - #define M66592_USBE 0x0001 /* b0: USB module operation enable */ 27 + #define M66592_XTAL 0xC000 /* b15-14: Crystal selection */ 28 + #define M66592_XTAL48 0x8000 /* 48MHz */ 29 + #define M66592_XTAL24 0x4000 /* 24MHz */ 30 + #define M66592_XTAL12 0x0000 /* 12MHz */ 31 + #define M66592_XCKE 0x2000 /* b13: External clock enable */ 32 + #define M66592_RCKE 0x1000 /* b12: Register clock enable */ 33 + #define M66592_PLLC 0x0800 /* b11: PLL control */ 34 + #define M66592_SCKE 0x0400 /* b10: USB clock enable */ 35 + #define M66592_ATCKM 0x0100 /* b8: Automatic clock supply */ 36 + #define M66592_HSE 0x0080 /* b7: Hi-speed enable */ 37 + #define M66592_DCFM 0x0040 /* b6: Controller function select */ 38 + #define M66592_DMRPD 0x0020 /* b5: D- pull down control */ 39 + #define M66592_DPRPU 0x0010 /* b4: D+ pull up control */ 40 + #define M66592_FSRPC 0x0004 /* b2: Full-speed receiver enable */ 41 + #define M66592_PCUT 0x0002 /* b1: Low power sleep enable */ 42 + #define M66592_USBE 0x0001 /* b0: USB module operation enable */ 43 43 44 44 #define M66592_SYSSTS 0x02 45 - #define M66592_LNST 0x0003 /* b1-0: D+, D- line status */ 46 - #define M66592_SE1 0x0003 /* SE1 */ 47 - #define M66592_KSTS 0x0002 /* K State */ 48 - #define M66592_JSTS 0x0001 /* J State */ 49 - #define M66592_SE0 0x0000 /* SE0 */ 45 + #define M66592_LNST 0x0003 /* b1-0: D+, D- line status */ 46 + #define M66592_SE1 0x0003 /* SE1 */ 47 + #define M66592_KSTS 0x0002 /* K State */ 48 + #define M66592_JSTS 0x0001 /* J State */ 49 + #define M66592_SE0 0x0000 /* SE0 */ 50 50 51 51 #define M66592_DVSTCTR 0x04 52 - #define M66592_WKUP 0x0100 /* b8: Remote wakeup */ 53 - #define M66592_RWUPE 0x0080 /* b7: Remote wakeup sense */ 54 - #define M66592_USBRST 0x0040 /* b6: USB reset enable */ 55 - #define M66592_RESUME 0x0020 /* b5: Resume enable */ 56 - #define M66592_UACT 0x0010 /* b4: USB bus enable */ 57 - #define M66592_RHST 0x0003 /* b1-0: Reset handshake status */ 58 - #define M66592_HSMODE 0x0003 /* Hi-Speed mode */ 59 - #define M66592_FSMODE 0x0002 /* Full-Speed mode */ 60 - #define M66592_HSPROC 0x0001 /* HS handshake is processing */ 52 + #define M66592_WKUP 0x0100 /* b8: Remote wakeup */ 53 + #define M66592_RWUPE 0x0080 /* b7: Remote wakeup sense */ 54 + #define M66592_USBRST 0x0040 /* b6: USB reset enable */ 55 + #define M66592_RESUME 0x0020 /* b5: Resume enable */ 56 + #define M66592_UACT 0x0010 /* b4: USB bus enable */ 57 + #define M66592_RHST 0x0003 /* b1-0: Reset handshake status */ 58 + #define M66592_HSMODE 0x0003 /* Hi-Speed mode */ 59 + #define M66592_FSMODE 0x0002 /* Full-Speed mode */ 60 + #define M66592_HSPROC 0x0001 /* HS handshake is processing */ 61 61 62 62 #define M66592_TESTMODE 0x06 63 - #define M66592_UTST 0x000F /* b4-0: Test select */ 64 - #define M66592_H_TST_PACKET 0x000C /* HOST TEST Packet */ 65 - #define M66592_H_TST_SE0_NAK 0x000B /* HOST TEST SE0 NAK */ 66 - #define M66592_H_TST_K 0x000A /* HOST TEST K */ 67 - #define M66592_H_TST_J 0x0009 /* HOST TEST J */ 68 - #define M66592_H_TST_NORMAL 0x0000 /* HOST Normal Mode */ 69 - #define M66592_P_TST_PACKET 0x0004 /* PERI TEST Packet */ 70 - #define M66592_P_TST_SE0_NAK 0x0003 /* PERI TEST SE0 NAK */ 71 - #define M66592_P_TST_K 0x0002 /* PERI TEST K */ 72 - #define M66592_P_TST_J 0x0001 /* PERI TEST J */ 73 - #define M66592_P_TST_NORMAL 0x0000 /* PERI Normal Mode */ 63 + #define M66592_UTST 0x000F /* b4-0: Test select */ 64 + #define M66592_H_TST_PACKET 0x000C /* HOST TEST Packet */ 65 + #define M66592_H_TST_SE0_NAK 0x000B /* HOST TEST SE0 NAK */ 66 + #define M66592_H_TST_K 0x000A /* HOST TEST K */ 67 + #define M66592_H_TST_J 0x0009 /* HOST TEST J */ 68 + #define M66592_H_TST_NORMAL 0x0000 /* HOST Normal Mode */ 69 + #define M66592_P_TST_PACKET 0x0004 /* PERI TEST Packet */ 70 + #define M66592_P_TST_SE0_NAK 0x0003 /* PERI TEST SE0 NAK */ 71 + #define M66592_P_TST_K 0x0002 /* PERI TEST K */ 72 + #define M66592_P_TST_J 0x0001 /* PERI TEST J */ 73 + #define M66592_P_TST_NORMAL 0x0000 /* PERI Normal Mode */ 74 74 75 75 #define M66592_PINCFG 0x0A 76 - #define M66592_LDRV 0x8000 /* b15: Drive Current Adjust */ 77 - #define M66592_BIGEND 0x0100 /* b8: Big endian mode */ 76 + #define M66592_LDRV 0x8000 /* b15: Drive Current Adjust */ 77 + #define M66592_BIGEND 0x0100 /* b8: Big endian mode */ 78 78 79 79 #define M66592_DMA0CFG 0x0C 80 80 #define M66592_DMA1CFG 0x0E 81 - #define M66592_DREQA 0x4000 /* b14: Dreq active select */ 82 - #define M66592_BURST 0x2000 /* b13: Burst mode */ 83 - #define M66592_DACKA 0x0400 /* b10: Dack active select */ 84 - #define M66592_DFORM 0x0380 /* b9-7: DMA mode select */ 85 - #define M66592_CPU_ADR_RD_WR 0x0000 /* Address + RD/WR mode (CPU bus) */ 86 - #define M66592_CPU_DACK_RD_WR 0x0100 /* DACK + RD/WR mode (CPU bus) */ 87 - #define M66592_CPU_DACK_ONLY 0x0180 /* DACK only mode (CPU bus) */ 88 - #define M66592_SPLIT_DACK_ONLY 0x0200 /* DACK only mode (SPLIT bus) */ 89 - #define M66592_SPLIT_DACK_DSTB 0x0300 /* DACK + DSTB0 mode (SPLIT bus) */ 90 - #define M66592_DENDA 0x0040 /* b6: Dend active select */ 91 - #define M66592_PKTM 0x0020 /* b5: Packet mode */ 92 - #define M66592_DENDE 0x0010 /* b4: Dend enable */ 93 - #define M66592_OBUS 0x0004 /* b2: OUTbus mode */ 81 + #define M66592_DREQA 0x4000 /* b14: Dreq active select */ 82 + #define M66592_BURST 0x2000 /* b13: Burst mode */ 83 + #define M66592_DACKA 0x0400 /* b10: Dack active select */ 84 + #define M66592_DFORM 0x0380 /* b9-7: DMA mode select */ 85 + #define M66592_CPU_ADR_RD_WR 0x0000 /* Address + RD/WR mode (CPU bus) */ 86 + #define M66592_CPU_DACK_RD_WR 0x0100 /* DACK + RD/WR mode (CPU bus) */ 87 + #define M66592_CPU_DACK_ONLY 0x0180 /* DACK only mode (CPU bus) */ 88 + #define M66592_SPLIT_DACK_ONLY 0x0200 /* DACK only mode (SPLIT bus) */ 89 + #define M66592_SPLIT_DACK_DSTB 0x0300 /* DACK + DSTB0 mode (SPLIT bus) */ 90 + #define M66592_DENDA 0x0040 /* b6: Dend active select */ 91 + #define M66592_PKTM 0x0020 /* b5: Packet mode */ 92 + #define M66592_DENDE 0x0010 /* b4: Dend enable */ 93 + #define M66592_OBUS 0x0004 /* b2: OUTbus mode */ 94 94 95 95 #define M66592_CFIFO 0x10 96 96 #define M66592_D0FIFO 0x14 ··· 99 99 #define M66592_CFIFOSEL 0x1E 100 100 #define M66592_D0FIFOSEL 0x24 101 101 #define M66592_D1FIFOSEL 0x2A 102 - #define M66592_RCNT 0x8000 /* b15: Read count mode */ 103 - #define M66592_REW 0x4000 /* b14: Buffer rewind */ 104 - #define M66592_DCLRM 0x2000 /* b13: DMA buffer clear mode */ 105 - #define M66592_DREQE 0x1000 /* b12: DREQ output enable */ 106 - #define M66592_MBW 0x0400 /* b10: Maximum bit width for FIFO access */ 107 - #define M66592_MBW_8 0x0000 /* 8bit */ 108 - #define M66592_MBW_16 0x0400 /* 16bit */ 109 - #define M66592_TRENB 0x0200 /* b9: Transaction counter enable */ 110 - #define M66592_TRCLR 0x0100 /* b8: Transaction counter clear */ 111 - #define M66592_DEZPM 0x0080 /* b7: Zero-length packet additional mode */ 112 - #define M66592_ISEL 0x0020 /* b5: DCP FIFO port direction select */ 113 - #define M66592_CURPIPE 0x0007 /* b2-0: PIPE select */ 102 + #define M66592_RCNT 0x8000 /* b15: Read count mode */ 103 + #define M66592_REW 0x4000 /* b14: Buffer rewind */ 104 + #define M66592_DCLRM 0x2000 /* b13: DMA buffer clear mode */ 105 + #define M66592_DREQE 0x1000 /* b12: DREQ output enable */ 106 + #define M66592_MBW 0x0400 /* b10: Maximum bit width for FIFO */ 107 + #define M66592_MBW_8 0x0000 /* 8bit */ 108 + #define M66592_MBW_16 0x0400 /* 16bit */ 109 + #define M66592_TRENB 0x0200 /* b9: Transaction counter enable */ 110 + #define M66592_TRCLR 0x0100 /* b8: Transaction counter clear */ 111 + #define M66592_DEZPM 0x0080 /* b7: Zero-length packet mode */ 112 + #define M66592_ISEL 0x0020 /* b5: DCP FIFO port direction select */ 113 + #define M66592_CURPIPE 0x0007 /* b2-0: PIPE select */ 114 114 115 115 #define M66592_CFIFOCTR 0x20 116 116 #define M66592_D0FIFOCTR 0x26 117 117 #define M66592_D1FIFOCTR 0x2c 118 - #define M66592_BVAL 0x8000 /* b15: Buffer valid flag */ 119 - #define M66592_BCLR 0x4000 /* b14: Buffer clear */ 120 - #define M66592_FRDY 0x2000 /* b13: FIFO ready */ 121 - #define M66592_DTLN 0x0FFF /* b11-0: FIFO received data length */ 118 + #define M66592_BVAL 0x8000 /* b15: Buffer valid flag */ 119 + #define M66592_BCLR 0x4000 /* b14: Buffer clear */ 120 + #define M66592_FRDY 0x2000 /* b13: FIFO ready */ 121 + #define M66592_DTLN 0x0FFF /* b11-0: FIFO received data length */ 122 122 123 123 #define M66592_CFIFOSIE 0x22 124 - #define M66592_TGL 0x8000 /* b15: Buffer toggle */ 125 - #define M66592_SCLR 0x4000 /* b14: Buffer clear */ 126 - #define M66592_SBUSY 0x2000 /* b13: SIE_FIFO busy */ 124 + #define M66592_TGL 0x8000 /* b15: Buffer toggle */ 125 + #define M66592_SCLR 0x4000 /* b14: Buffer clear */ 126 + #define M66592_SBUSY 0x2000 /* b13: SIE_FIFO busy */ 127 127 128 128 #define M66592_D0FIFOTRN 0x28 129 129 #define M66592_D1FIFOTRN 0x2E 130 - #define M66592_TRNCNT 0xFFFF /* b15-0: Transaction counter */ 130 + #define M66592_TRNCNT 0xFFFF /* b15-0: Transaction counter */ 131 131 132 132 #define M66592_INTENB0 0x30 133 - #define M66592_VBSE 0x8000 /* b15: VBUS interrupt */ 134 - #define M66592_RSME 0x4000 /* b14: Resume interrupt */ 135 - #define M66592_SOFE 0x2000 /* b13: Frame update interrupt */ 136 - #define M66592_DVSE 0x1000 /* b12: Device state transition interrupt */ 137 - #define M66592_CTRE 0x0800 /* b11: Control transfer stage transition interrupt */ 138 - #define M66592_BEMPE 0x0400 /* b10: Buffer empty interrupt */ 139 - #define M66592_NRDYE 0x0200 /* b9: Buffer not ready interrupt */ 140 - #define M66592_BRDYE 0x0100 /* b8: Buffer ready interrupt */ 141 - #define M66592_URST 0x0080 /* b7: USB reset detected interrupt */ 142 - #define M66592_SADR 0x0040 /* b6: Set address executed interrupt */ 143 - #define M66592_SCFG 0x0020 /* b5: Set configuration executed interrupt */ 144 - #define M66592_SUSP 0x0010 /* b4: Suspend detected interrupt */ 145 - #define M66592_WDST 0x0008 /* b3: Control write data stage completed interrupt */ 146 - #define M66592_RDST 0x0004 /* b2: Control read data stage completed interrupt */ 147 - #define M66592_CMPL 0x0002 /* b1: Control transfer complete interrupt */ 148 - #define M66592_SERR 0x0001 /* b0: Sequence error interrupt */ 133 + #define M66592_VBSE 0x8000 /* b15: VBUS interrupt */ 134 + #define M66592_RSME 0x4000 /* b14: Resume interrupt */ 135 + #define M66592_SOFE 0x2000 /* b13: Frame update interrupt */ 136 + #define M66592_DVSE 0x1000 /* b12: Device state transition interrupt */ 137 + #define M66592_CTRE 0x0800 /* b11: Control transfer stage transition irq */ 138 + #define M66592_BEMPE 0x0400 /* b10: Buffer empty interrupt */ 139 + #define M66592_NRDYE 0x0200 /* b9: Buffer not ready interrupt */ 140 + #define M66592_BRDYE 0x0100 /* b8: Buffer ready interrupt */ 141 + #define M66592_URST 0x0080 /* b7: USB reset detected interrupt */ 142 + #define M66592_SADR 0x0040 /* b6: Set address executed interrupt */ 143 + #define M66592_SCFG 0x0020 /* b5: Set configuration executed interrupt */ 144 + #define M66592_SUSP 0x0010 /* b4: Suspend detected interrupt */ 145 + #define M66592_WDST 0x0008 /* b3: Control write data stage completed irq */ 146 + #define M66592_RDST 0x0004 /* b2: Control read data stage completed irq */ 147 + #define M66592_CMPL 0x0002 /* b1: Control transfer complete interrupt */ 148 + #define M66592_SERR 0x0001 /* b0: Sequence error interrupt */ 149 149 150 150 #define M66592_INTENB1 0x32 151 - #define M66592_BCHGE 0x4000 /* b14: USB us chenge interrupt */ 152 - #define M66592_DTCHE 0x1000 /* b12: Detach sense interrupt */ 153 - #define M66592_SIGNE 0x0020 /* b5: SETUP IGNORE interrupt */ 154 - #define M66592_SACKE 0x0010 /* b4: SETUP ACK interrupt */ 155 - #define M66592_BRDYM 0x0004 /* b2: BRDY clear timing */ 156 - #define M66592_INTL 0x0002 /* b1: Interrupt sense select */ 157 - #define M66592_PCSE 0x0001 /* b0: PCUT enable by CS assert */ 151 + #define M66592_BCHGE 0x4000 /* b14: USB us chenge interrupt */ 152 + #define M66592_DTCHE 0x1000 /* b12: Detach sense interrupt */ 153 + #define M66592_SIGNE 0x0020 /* b5: SETUP IGNORE interrupt */ 154 + #define M66592_SACKE 0x0010 /* b4: SETUP ACK interrupt */ 155 + #define M66592_BRDYM 0x0004 /* b2: BRDY clear timing */ 156 + #define M66592_INTL 0x0002 /* b1: Interrupt sense select */ 157 + #define M66592_PCSE 0x0001 /* b0: PCUT enable by CS assert */ 158 158 159 159 #define M66592_BRDYENB 0x36 160 160 #define M66592_BRDYSTS 0x46 161 - #define M66592_BRDY7 0x0080 /* b7: PIPE7 */ 162 - #define M66592_BRDY6 0x0040 /* b6: PIPE6 */ 163 - #define M66592_BRDY5 0x0020 /* b5: PIPE5 */ 164 - #define M66592_BRDY4 0x0010 /* b4: PIPE4 */ 165 - #define M66592_BRDY3 0x0008 /* b3: PIPE3 */ 166 - #define M66592_BRDY2 0x0004 /* b2: PIPE2 */ 167 - #define M66592_BRDY1 0x0002 /* b1: PIPE1 */ 168 - #define M66592_BRDY0 0x0001 /* b1: PIPE0 */ 161 + #define M66592_BRDY7 0x0080 /* b7: PIPE7 */ 162 + #define M66592_BRDY6 0x0040 /* b6: PIPE6 */ 163 + #define M66592_BRDY5 0x0020 /* b5: PIPE5 */ 164 + #define M66592_BRDY4 0x0010 /* b4: PIPE4 */ 165 + #define M66592_BRDY3 0x0008 /* b3: PIPE3 */ 166 + #define M66592_BRDY2 0x0004 /* b2: PIPE2 */ 167 + #define M66592_BRDY1 0x0002 /* b1: PIPE1 */ 168 + #define M66592_BRDY0 0x0001 /* b1: PIPE0 */ 169 169 170 170 #define M66592_NRDYENB 0x38 171 171 #define M66592_NRDYSTS 0x48 172 - #define M66592_NRDY7 0x0080 /* b7: PIPE7 */ 173 - #define M66592_NRDY6 0x0040 /* b6: PIPE6 */ 174 - #define M66592_NRDY5 0x0020 /* b5: PIPE5 */ 175 - #define M66592_NRDY4 0x0010 /* b4: PIPE4 */ 176 - #define M66592_NRDY3 0x0008 /* b3: PIPE3 */ 177 - #define M66592_NRDY2 0x0004 /* b2: PIPE2 */ 178 - #define M66592_NRDY1 0x0002 /* b1: PIPE1 */ 179 - #define M66592_NRDY0 0x0001 /* b1: PIPE0 */ 172 + #define M66592_NRDY7 0x0080 /* b7: PIPE7 */ 173 + #define M66592_NRDY6 0x0040 /* b6: PIPE6 */ 174 + #define M66592_NRDY5 0x0020 /* b5: PIPE5 */ 175 + #define M66592_NRDY4 0x0010 /* b4: PIPE4 */ 176 + #define M66592_NRDY3 0x0008 /* b3: PIPE3 */ 177 + #define M66592_NRDY2 0x0004 /* b2: PIPE2 */ 178 + #define M66592_NRDY1 0x0002 /* b1: PIPE1 */ 179 + #define M66592_NRDY0 0x0001 /* b1: PIPE0 */ 180 180 181 181 #define M66592_BEMPENB 0x3A 182 182 #define M66592_BEMPSTS 0x4A 183 - #define M66592_BEMP7 0x0080 /* b7: PIPE7 */ 184 - #define M66592_BEMP6 0x0040 /* b6: PIPE6 */ 185 - #define M66592_BEMP5 0x0020 /* b5: PIPE5 */ 186 - #define M66592_BEMP4 0x0010 /* b4: PIPE4 */ 187 - #define M66592_BEMP3 0x0008 /* b3: PIPE3 */ 188 - #define M66592_BEMP2 0x0004 /* b2: PIPE2 */ 189 - #define M66592_BEMP1 0x0002 /* b1: PIPE1 */ 190 - #define M66592_BEMP0 0x0001 /* b0: PIPE0 */ 183 + #define M66592_BEMP7 0x0080 /* b7: PIPE7 */ 184 + #define M66592_BEMP6 0x0040 /* b6: PIPE6 */ 185 + #define M66592_BEMP5 0x0020 /* b5: PIPE5 */ 186 + #define M66592_BEMP4 0x0010 /* b4: PIPE4 */ 187 + #define M66592_BEMP3 0x0008 /* b3: PIPE3 */ 188 + #define M66592_BEMP2 0x0004 /* b2: PIPE2 */ 189 + #define M66592_BEMP1 0x0002 /* b1: PIPE1 */ 190 + #define M66592_BEMP0 0x0001 /* b0: PIPE0 */ 191 191 192 192 #define M66592_SOFCFG 0x3C 193 - #define M66592_SOFM 0x000C /* b3-2: SOF palse mode */ 194 - #define M66592_SOF_125US 0x0008 /* SOF OUT 125us uFrame Signal */ 195 - #define M66592_SOF_1MS 0x0004 /* SOF OUT 1ms Frame Signal */ 196 - #define M66592_SOF_DISABLE 0x0000 /* SOF OUT Disable */ 193 + #define M66592_SOFM 0x000C /* b3-2: SOF palse mode */ 194 + #define M66592_SOF_125US 0x0008 /* SOF OUT 125us uFrame Signal */ 195 + #define M66592_SOF_1MS 0x0004 /* SOF OUT 1ms Frame Signal */ 196 + #define M66592_SOF_DISABLE 0x0000 /* SOF OUT Disable */ 197 197 198 198 #define M66592_INTSTS0 0x40 199 - #define M66592_VBINT 0x8000 /* b15: VBUS interrupt */ 200 - #define M66592_RESM 0x4000 /* b14: Resume interrupt */ 201 - #define M66592_SOFR 0x2000 /* b13: SOF frame update interrupt */ 202 - #define M66592_DVST 0x1000 /* b12: Device state transition interrupt */ 203 - #define M66592_CTRT 0x0800 /* b11: Control transfer stage transition interrupt */ 204 - #define M66592_BEMP 0x0400 /* b10: Buffer empty interrupt */ 205 - #define M66592_NRDY 0x0200 /* b9: Buffer not ready interrupt */ 206 - #define M66592_BRDY 0x0100 /* b8: Buffer ready interrupt */ 207 - #define M66592_VBSTS 0x0080 /* b7: VBUS input port */ 208 - #define M66592_DVSQ 0x0070 /* b6-4: Device state */ 209 - #define M66592_DS_SPD_CNFG 0x0070 /* Suspend Configured */ 210 - #define M66592_DS_SPD_ADDR 0x0060 /* Suspend Address */ 211 - #define M66592_DS_SPD_DFLT 0x0050 /* Suspend Default */ 212 - #define M66592_DS_SPD_POWR 0x0040 /* Suspend Powered */ 213 - #define M66592_DS_SUSP 0x0040 /* Suspend */ 214 - #define M66592_DS_CNFG 0x0030 /* Configured */ 215 - #define M66592_DS_ADDS 0x0020 /* Address */ 216 - #define M66592_DS_DFLT 0x0010 /* Default */ 217 - #define M66592_DS_POWR 0x0000 /* Powered */ 218 - #define M66592_DVSQS 0x0030 /* b5-4: Device state */ 219 - #define M66592_VALID 0x0008 /* b3: Setup packet detected flag */ 220 - #define M66592_CTSQ 0x0007 /* b2-0: Control transfer stage */ 221 - #define M66592_CS_SQER 0x0006 /* Sequence error */ 222 - #define M66592_CS_WRND 0x0005 /* Control write nodata status stage */ 223 - #define M66592_CS_WRSS 0x0004 /* Control write status stage */ 224 - #define M66592_CS_WRDS 0x0003 /* Control write data stage */ 225 - #define M66592_CS_RDSS 0x0002 /* Control read status stage */ 226 - #define M66592_CS_RDDS 0x0001 /* Control read data stage */ 227 - #define M66592_CS_IDST 0x0000 /* Idle or setup stage */ 199 + #define M66592_VBINT 0x8000 /* b15: VBUS interrupt */ 200 + #define M66592_RESM 0x4000 /* b14: Resume interrupt */ 201 + #define M66592_SOFR 0x2000 /* b13: SOF frame update interrupt */ 202 + #define M66592_DVST 0x1000 /* b12: Device state transition */ 203 + #define M66592_CTRT 0x0800 /* b11: Control stage transition */ 204 + #define M66592_BEMP 0x0400 /* b10: Buffer empty interrupt */ 205 + #define M66592_NRDY 0x0200 /* b9: Buffer not ready interrupt */ 206 + #define M66592_BRDY 0x0100 /* b8: Buffer ready interrupt */ 207 + #define M66592_VBSTS 0x0080 /* b7: VBUS input port */ 208 + #define M66592_DVSQ 0x0070 /* b6-4: Device state */ 209 + #define M66592_DS_SPD_CNFG 0x0070 /* Suspend Configured */ 210 + #define M66592_DS_SPD_ADDR 0x0060 /* Suspend Address */ 211 + #define M66592_DS_SPD_DFLT 0x0050 /* Suspend Default */ 212 + #define M66592_DS_SPD_POWR 0x0040 /* Suspend Powered */ 213 + #define M66592_DS_SUSP 0x0040 /* Suspend */ 214 + #define M66592_DS_CNFG 0x0030 /* Configured */ 215 + #define M66592_DS_ADDS 0x0020 /* Address */ 216 + #define M66592_DS_DFLT 0x0010 /* Default */ 217 + #define M66592_DS_POWR 0x0000 /* Powered */ 218 + #define M66592_DVSQS 0x0030 /* b5-4: Device state */ 219 + #define M66592_VALID 0x0008 /* b3: Setup packet detected flag */ 220 + #define M66592_CTSQ 0x0007 /* b2-0: Control transfer stage */ 221 + #define M66592_CS_SQER 0x0006 /* Sequence error */ 222 + #define M66592_CS_WRND 0x0005 /* Control write nodata status */ 223 + #define M66592_CS_WRSS 0x0004 /* Control write status stage */ 224 + #define M66592_CS_WRDS 0x0003 /* Control write data stage */ 225 + #define M66592_CS_RDSS 0x0002 /* Control read status stage */ 226 + #define M66592_CS_RDDS 0x0001 /* Control read data stage */ 227 + #define M66592_CS_IDST 0x0000 /* Idle or setup stage */ 228 228 229 229 #define M66592_INTSTS1 0x42 230 - #define M66592_BCHG 0x4000 /* b14: USB bus chenge interrupt */ 231 - #define M66592_DTCH 0x1000 /* b12: Detach sense interrupt */ 232 - #define M66592_SIGN 0x0020 /* b5: SETUP IGNORE interrupt */ 233 - #define M66592_SACK 0x0010 /* b4: SETUP ACK interrupt */ 230 + #define M66592_BCHG 0x4000 /* b14: USB bus chenge interrupt */ 231 + #define M66592_DTCH 0x1000 /* b12: Detach sense interrupt */ 232 + #define M66592_SIGN 0x0020 /* b5: SETUP IGNORE interrupt */ 233 + #define M66592_SACK 0x0010 /* b4: SETUP ACK interrupt */ 234 234 235 235 #define M66592_FRMNUM 0x4C 236 - #define M66592_OVRN 0x8000 /* b15: Overrun error */ 237 - #define M66592_CRCE 0x4000 /* b14: Received data error */ 238 - #define M66592_SOFRM 0x0800 /* b11: SOF output mode */ 239 - #define M66592_FRNM 0x07FF /* b10-0: Frame number */ 236 + #define M66592_OVRN 0x8000 /* b15: Overrun error */ 237 + #define M66592_CRCE 0x4000 /* b14: Received data error */ 238 + #define M66592_SOFRM 0x0800 /* b11: SOF output mode */ 239 + #define M66592_FRNM 0x07FF /* b10-0: Frame number */ 240 240 241 241 #define M66592_UFRMNUM 0x4E 242 - #define M66592_UFRNM 0x0007 /* b2-0: Micro frame number */ 242 + #define M66592_UFRNM 0x0007 /* b2-0: Micro frame number */ 243 243 244 244 #define M66592_RECOVER 0x50 245 - #define M66592_STSRECOV 0x0700 /* Status recovery */ 246 - #define M66592_STSR_HI 0x0400 /* FULL(0) or HI(1) Speed */ 247 - #define M66592_STSR_DEFAULT 0x0100 /* Default state */ 248 - #define M66592_STSR_ADDRESS 0x0200 /* Address state */ 249 - #define M66592_STSR_CONFIG 0x0300 /* Configured state */ 250 - #define M66592_USBADDR 0x007F /* b6-0: USB address */ 245 + #define M66592_STSRECOV 0x0700 /* Status recovery */ 246 + #define M66592_STSR_HI 0x0400 /* FULL(0) or HI(1) Speed */ 247 + #define M66592_STSR_DEFAULT 0x0100 /* Default state */ 248 + #define M66592_STSR_ADDRESS 0x0200 /* Address state */ 249 + #define M66592_STSR_CONFIG 0x0300 /* Configured state */ 250 + #define M66592_USBADDR 0x007F /* b6-0: USB address */ 251 251 252 252 #define M66592_USBREQ 0x54 253 - #define M66592_bRequest 0xFF00 /* b15-8: bRequest */ 254 - #define M66592_GET_STATUS 0x0000 255 - #define M66592_CLEAR_FEATURE 0x0100 256 - #define M66592_ReqRESERVED 0x0200 257 - #define M66592_SET_FEATURE 0x0300 258 - #define M66592_ReqRESERVED1 0x0400 259 - #define M66592_SET_ADDRESS 0x0500 260 - #define M66592_GET_DESCRIPTOR 0x0600 261 - #define M66592_SET_DESCRIPTOR 0x0700 262 - #define M66592_GET_CONFIGURATION 0x0800 263 - #define M66592_SET_CONFIGURATION 0x0900 264 - #define M66592_GET_INTERFACE 0x0A00 265 - #define M66592_SET_INTERFACE 0x0B00 266 - #define M66592_SYNCH_FRAME 0x0C00 267 - #define M66592_bmRequestType 0x00FF /* b7-0: bmRequestType */ 268 - #define M66592_bmRequestTypeDir 0x0080 /* b7 : Data transfer direction */ 269 - #define M66592_HOST_TO_DEVICE 0x0000 270 - #define M66592_DEVICE_TO_HOST 0x0080 271 - #define M66592_bmRequestTypeType 0x0060 /* b6-5: Type */ 272 - #define M66592_STANDARD 0x0000 273 - #define M66592_CLASS 0x0020 274 - #define M66592_VENDOR 0x0040 275 - #define M66592_bmRequestTypeRecip 0x001F /* b4-0: Recipient */ 276 - #define M66592_DEVICE 0x0000 277 - #define M66592_INTERFACE 0x0001 278 - #define M66592_ENDPOINT 0x0002 253 + #define M66592_bRequest 0xFF00 /* b15-8: bRequest */ 254 + #define M66592_GET_STATUS 0x0000 255 + #define M66592_CLEAR_FEATURE 0x0100 256 + #define M66592_ReqRESERVED 0x0200 257 + #define M66592_SET_FEATURE 0x0300 258 + #define M66592_ReqRESERVED1 0x0400 259 + #define M66592_SET_ADDRESS 0x0500 260 + #define M66592_GET_DESCRIPTOR 0x0600 261 + #define M66592_SET_DESCRIPTOR 0x0700 262 + #define M66592_GET_CONFIGURATION 0x0800 263 + #define M66592_SET_CONFIGURATION 0x0900 264 + #define M66592_GET_INTERFACE 0x0A00 265 + #define M66592_SET_INTERFACE 0x0B00 266 + #define M66592_SYNCH_FRAME 0x0C00 267 + #define M66592_bmRequestType 0x00FF /* b7-0: bmRequestType */ 268 + #define M66592_bmRequestTypeDir 0x0080 /* b7 : Data direction */ 269 + #define M66592_HOST_TO_DEVICE 0x0000 270 + #define M66592_DEVICE_TO_HOST 0x0080 271 + #define M66592_bmRequestTypeType 0x0060 /* b6-5: Type */ 272 + #define M66592_STANDARD 0x0000 273 + #define M66592_CLASS 0x0020 274 + #define M66592_VENDOR 0x0040 275 + #define M66592_bmRequestTypeRecip 0x001F /* b4-0: Recipient */ 276 + #define M66592_DEVICE 0x0000 277 + #define M66592_INTERFACE 0x0001 278 + #define M66592_ENDPOINT 0x0002 279 279 280 280 #define M66592_USBVAL 0x56 281 - #define M66592_wValue 0xFFFF /* b15-0: wValue */ 281 + #define M66592_wValue 0xFFFF /* b15-0: wValue */ 282 282 /* Standard Feature Selector */ 283 - #define M66592_ENDPOINT_HALT 0x0000 284 - #define M66592_DEVICE_REMOTE_WAKEUP 0x0001 285 - #define M66592_TEST_MODE 0x0002 283 + #define M66592_ENDPOINT_HALT 0x0000 284 + #define M66592_DEVICE_REMOTE_WAKEUP 0x0001 285 + #define M66592_TEST_MODE 0x0002 286 286 /* Descriptor Types */ 287 - #define M66592_DT_TYPE 0xFF00 288 - #define M66592_GET_DT_TYPE(v) (((v) & DT_TYPE) >> 8) 289 - #define M66592_DT_DEVICE 0x01 290 - #define M66592_DT_CONFIGURATION 0x02 291 - #define M66592_DT_STRING 0x03 292 - #define M66592_DT_INTERFACE 0x04 293 - #define M66592_DT_ENDPOINT 0x05 294 - #define M66592_DT_DEVICE_QUALIFIER 0x06 295 - #define M66592_DT_OTHER_SPEED_CONFIGURATION 0x07 296 - #define M66592_DT_INTERFACE_POWER 0x08 297 - #define M66592_DT_INDEX 0x00FF 298 - #define M66592_CONF_NUM 0x00FF 299 - #define M66592_ALT_SET 0x00FF 287 + #define M66592_DT_TYPE 0xFF00 288 + #define M66592_GET_DT_TYPE(v) (((v) & DT_TYPE) >> 8) 289 + #define M66592_DT_DEVICE 0x01 290 + #define M66592_DT_CONFIGURATION 0x02 291 + #define M66592_DT_STRING 0x03 292 + #define M66592_DT_INTERFACE 0x04 293 + #define M66592_DT_ENDPOINT 0x05 294 + #define M66592_DT_DEVICE_QUALIFIER 0x06 295 + #define M66592_DT_OTHER_SPEED_CONFIGURATION 0x07 296 + #define M66592_DT_INTERFACE_POWER 0x08 297 + #define M66592_DT_INDEX 0x00FF 298 + #define M66592_CONF_NUM 0x00FF 299 + #define M66592_ALT_SET 0x00FF 300 300 301 301 #define M66592_USBINDEX 0x58 302 - #define M66592_wIndex 0xFFFF /* b15-0: wIndex */ 303 - #define M66592_TEST_SELECT 0xFF00 /* b15-b8: Test Mode Selectors */ 304 - #define M66592_TEST_J 0x0100 /* Test_J */ 305 - #define M66592_TEST_K 0x0200 /* Test_K */ 306 - #define M66592_TEST_SE0_NAK 0x0300 /* Test_SE0_NAK */ 307 - #define M66592_TEST_PACKET 0x0400 /* Test_Packet */ 308 - #define M66592_TEST_FORCE_ENABLE 0x0500 /* Test_Force_Enable */ 309 - #define M66592_TEST_STSelectors 0x0600 /* Standard test selectors */ 310 - #define M66592_TEST_Reserved 0x4000 /* Reserved */ 311 - #define M66592_TEST_VSTModes 0xC000 /* Vendor-specific test modes */ 312 - #define M66592_EP_DIR 0x0080 /* b7: Endpoint Direction */ 313 - #define M66592_EP_DIR_IN 0x0080 314 - #define M66592_EP_DIR_OUT 0x0000 302 + #define M66592_wIndex 0xFFFF /* b15-0: wIndex */ 303 + #define M66592_TEST_SELECT 0xFF00 /* b15-b8: Test Mode */ 304 + #define M66592_TEST_J 0x0100 /* Test_J */ 305 + #define M66592_TEST_K 0x0200 /* Test_K */ 306 + #define M66592_TEST_SE0_NAK 0x0300 /* Test_SE0_NAK */ 307 + #define M66592_TEST_PACKET 0x0400 /* Test_Packet */ 308 + #define M66592_TEST_FORCE_ENABLE 0x0500 /* Test_Force_Enable */ 309 + #define M66592_TEST_STSelectors 0x0600 /* Standard test selectors */ 310 + #define M66592_TEST_Reserved 0x4000 /* Reserved */ 311 + #define M66592_TEST_VSTModes 0xC000 /* Vendor-specific tests */ 312 + #define M66592_EP_DIR 0x0080 /* b7: Endpoint Direction */ 313 + #define M66592_EP_DIR_IN 0x0080 314 + #define M66592_EP_DIR_OUT 0x0000 315 315 316 316 #define M66592_USBLENG 0x5A 317 - #define M66592_wLength 0xFFFF /* b15-0: wLength */ 317 + #define M66592_wLength 0xFFFF /* b15-0: wLength */ 318 318 319 319 #define M66592_DCPCFG 0x5C 320 - #define M66592_CNTMD 0x0100 /* b8: Continuous transfer mode select */ 321 - #define M66592_DIR 0x0010 /* b4: Control transfer DIR select */ 320 + #define M66592_CNTMD 0x0100 /* b8: Continuous transfer mode */ 321 + #define M66592_DIR 0x0010 /* b4: Control transfer DIR select */ 322 322 323 323 #define M66592_DCPMAXP 0x5E 324 - #define M66592_DEVSEL 0xC000 /* b15-14: Device address select */ 325 - #define M66592_DEVICE_0 0x0000 /* Device address 0 */ 326 - #define M66592_DEVICE_1 0x4000 /* Device address 1 */ 327 - #define M66592_DEVICE_2 0x8000 /* Device address 2 */ 328 - #define M66592_DEVICE_3 0xC000 /* Device address 3 */ 329 - #define M66592_MAXP 0x007F /* b6-0: Maxpacket size of default control pipe */ 324 + #define M66592_DEVSEL 0xC000 /* b15-14: Device address select */ 325 + #define M66592_DEVICE_0 0x0000 /* Device address 0 */ 326 + #define M66592_DEVICE_1 0x4000 /* Device address 1 */ 327 + #define M66592_DEVICE_2 0x8000 /* Device address 2 */ 328 + #define M66592_DEVICE_3 0xC000 /* Device address 3 */ 329 + #define M66592_MAXP 0x007F /* b6-0: Maxpacket size of ep0 */ 330 330 331 331 #define M66592_DCPCTR 0x60 332 - #define M66592_BSTS 0x8000 /* b15: Buffer status */ 333 - #define M66592_SUREQ 0x4000 /* b14: Send USB request */ 334 - #define M66592_SQCLR 0x0100 /* b8: Sequence toggle bit clear */ 335 - #define M66592_SQSET 0x0080 /* b7: Sequence toggle bit set */ 336 - #define M66592_SQMON 0x0040 /* b6: Sequence toggle bit monitor */ 337 - #define M66592_CCPL 0x0004 /* b2: Enable control transfer complete */ 338 - #define M66592_PID 0x0003 /* b1-0: Response PID */ 339 - #define M66592_PID_STALL 0x0002 /* STALL */ 340 - #define M66592_PID_BUF 0x0001 /* BUF */ 341 - #define M66592_PID_NAK 0x0000 /* NAK */ 332 + #define M66592_BSTS 0x8000 /* b15: Buffer status */ 333 + #define M66592_SUREQ 0x4000 /* b14: Send USB request */ 334 + #define M66592_SQCLR 0x0100 /* b8: Sequence toggle bit clear */ 335 + #define M66592_SQSET 0x0080 /* b7: Sequence toggle bit set */ 336 + #define M66592_SQMON 0x0040 /* b6: Sequence toggle bit monitor */ 337 + #define M66592_CCPL 0x0004 /* b2: control transfer complete */ 338 + #define M66592_PID 0x0003 /* b1-0: Response PID */ 339 + #define M66592_PID_STALL 0x0002 /* STALL */ 340 + #define M66592_PID_BUF 0x0001 /* BUF */ 341 + #define M66592_PID_NAK 0x0000 /* NAK */ 342 342 343 343 #define M66592_PIPESEL 0x64 344 - #define M66592_PIPENM 0x0007 /* b2-0: Pipe select */ 345 - #define M66592_PIPE0 0x0000 /* PIPE 0 */ 346 - #define M66592_PIPE1 0x0001 /* PIPE 1 */ 347 - #define M66592_PIPE2 0x0002 /* PIPE 2 */ 348 - #define M66592_PIPE3 0x0003 /* PIPE 3 */ 349 - #define M66592_PIPE4 0x0004 /* PIPE 4 */ 350 - #define M66592_PIPE5 0x0005 /* PIPE 5 */ 351 - #define M66592_PIPE6 0x0006 /* PIPE 6 */ 352 - #define M66592_PIPE7 0x0007 /* PIPE 7 */ 344 + #define M66592_PIPENM 0x0007 /* b2-0: Pipe select */ 345 + #define M66592_PIPE0 0x0000 /* PIPE 0 */ 346 + #define M66592_PIPE1 0x0001 /* PIPE 1 */ 347 + #define M66592_PIPE2 0x0002 /* PIPE 2 */ 348 + #define M66592_PIPE3 0x0003 /* PIPE 3 */ 349 + #define M66592_PIPE4 0x0004 /* PIPE 4 */ 350 + #define M66592_PIPE5 0x0005 /* PIPE 5 */ 351 + #define M66592_PIPE6 0x0006 /* PIPE 6 */ 352 + #define M66592_PIPE7 0x0007 /* PIPE 7 */ 353 353 354 354 #define M66592_PIPECFG 0x66 355 - #define M66592_TYP 0xC000 /* b15-14: Transfer type */ 356 - #define M66592_ISO 0xC000 /* Isochronous */ 357 - #define M66592_INT 0x8000 /* Interrupt */ 358 - #define M66592_BULK 0x4000 /* Bulk */ 359 - #define M66592_BFRE 0x0400 /* b10: Buffer ready interrupt mode select */ 360 - #define M66592_DBLB 0x0200 /* b9: Double buffer mode select */ 361 - #define M66592_CNTMD 0x0100 /* b8: Continuous transfer mode select */ 362 - #define M66592_SHTNAK 0x0080 /* b7: Transfer end NAK */ 363 - #define M66592_DIR 0x0010 /* b4: Transfer direction select */ 364 - #define M66592_DIR_H_OUT 0x0010 /* HOST OUT */ 365 - #define M66592_DIR_P_IN 0x0010 /* PERI IN */ 366 - #define M66592_DIR_H_IN 0x0000 /* HOST IN */ 367 - #define M66592_DIR_P_OUT 0x0000 /* PERI OUT */ 368 - #define M66592_EPNUM 0x000F /* b3-0: Eendpoint number select */ 369 - #define M66592_EP1 0x0001 370 - #define M66592_EP2 0x0002 371 - #define M66592_EP3 0x0003 372 - #define M66592_EP4 0x0004 373 - #define M66592_EP5 0x0005 374 - #define M66592_EP6 0x0006 375 - #define M66592_EP7 0x0007 376 - #define M66592_EP8 0x0008 377 - #define M66592_EP9 0x0009 378 - #define M66592_EP10 0x000A 379 - #define M66592_EP11 0x000B 380 - #define M66592_EP12 0x000C 381 - #define M66592_EP13 0x000D 382 - #define M66592_EP14 0x000E 383 - #define M66592_EP15 0x000F 355 + #define M66592_TYP 0xC000 /* b15-14: Transfer type */ 356 + #define M66592_ISO 0xC000 /* Isochronous */ 357 + #define M66592_INT 0x8000 /* Interrupt */ 358 + #define M66592_BULK 0x4000 /* Bulk */ 359 + #define M66592_BFRE 0x0400 /* b10: Buffer ready interrupt mode */ 360 + #define M66592_DBLB 0x0200 /* b9: Double buffer mode select */ 361 + #define M66592_CNTMD 0x0100 /* b8: Continuous transfer mode */ 362 + #define M66592_SHTNAK 0x0080 /* b7: Transfer end NAK */ 363 + #define M66592_DIR 0x0010 /* b4: Transfer direction select */ 364 + #define M66592_DIR_H_OUT 0x0010 /* HOST OUT */ 365 + #define M66592_DIR_P_IN 0x0010 /* PERI IN */ 366 + #define M66592_DIR_H_IN 0x0000 /* HOST IN */ 367 + #define M66592_DIR_P_OUT 0x0000 /* PERI OUT */ 368 + #define M66592_EPNUM 0x000F /* b3-0: Eendpoint number select */ 369 + #define M66592_EP1 0x0001 370 + #define M66592_EP2 0x0002 371 + #define M66592_EP3 0x0003 372 + #define M66592_EP4 0x0004 373 + #define M66592_EP5 0x0005 374 + #define M66592_EP6 0x0006 375 + #define M66592_EP7 0x0007 376 + #define M66592_EP8 0x0008 377 + #define M66592_EP9 0x0009 378 + #define M66592_EP10 0x000A 379 + #define M66592_EP11 0x000B 380 + #define M66592_EP12 0x000C 381 + #define M66592_EP13 0x000D 382 + #define M66592_EP14 0x000E 383 + #define M66592_EP15 0x000F 384 384 385 385 #define M66592_PIPEBUF 0x68 386 - #define M66592_BUFSIZE 0x7C00 /* b14-10: Pipe buffer size */ 387 - #define M66592_BUF_SIZE(x) ((((x) / 64) - 1) << 10) 388 - #define M66592_BUFNMB 0x00FF /* b7-0: Pipe buffer number */ 386 + #define M66592_BUFSIZE 0x7C00 /* b14-10: Pipe buffer size */ 387 + #define M66592_BUF_SIZE(x) ((((x) / 64) - 1) << 10) 388 + #define M66592_BUFNMB 0x00FF /* b7-0: Pipe buffer number */ 389 389 390 390 #define M66592_PIPEMAXP 0x6A 391 - #define M66592_MXPS 0x07FF /* b10-0: Maxpacket size */ 391 + #define M66592_MXPS 0x07FF /* b10-0: Maxpacket size */ 392 392 393 393 #define M66592_PIPEPERI 0x6C 394 - #define M66592_IFIS 0x1000 /* b12: Isochronous in-buffer flush mode select */ 395 - #define M66592_IITV 0x0007 /* b2-0: Isochronous interval */ 394 + #define M66592_IFIS 0x1000 /* b12: ISO in-buffer flush mode */ 395 + #define M66592_IITV 0x0007 /* b2-0: ISO interval */ 396 396 397 397 #define M66592_PIPE1CTR 0x70 398 398 #define M66592_PIPE2CTR 0x72 ··· 401 401 #define M66592_PIPE5CTR 0x78 402 402 #define M66592_PIPE6CTR 0x7A 403 403 #define M66592_PIPE7CTR 0x7C 404 - #define M66592_BSTS 0x8000 /* b15: Buffer status */ 405 - #define M66592_INBUFM 0x4000 /* b14: IN buffer monitor (Only for PIPE1 to 5) */ 406 - #define M66592_ACLRM 0x0200 /* b9: Out buffer auto clear mode */ 407 - #define M66592_SQCLR 0x0100 /* b8: Sequence toggle bit clear */ 408 - #define M66592_SQSET 0x0080 /* b7: Sequence toggle bit set */ 409 - #define M66592_SQMON 0x0040 /* b6: Sequence toggle bit monitor */ 410 - #define M66592_PID 0x0003 /* b1-0: Response PID */ 404 + #define M66592_BSTS 0x8000 /* b15: Buffer status */ 405 + #define M66592_INBUFM 0x4000 /* b14: IN buffer monitor (PIPE 1-5) */ 406 + #define M66592_ACLRM 0x0200 /* b9: Out buffer auto clear mode */ 407 + #define M66592_SQCLR 0x0100 /* b8: Sequence toggle bit clear */ 408 + #define M66592_SQSET 0x0080 /* b7: Sequence toggle bit set */ 409 + #define M66592_SQMON 0x0040 /* b6: Sequence toggle bit monitor */ 410 + #define M66592_PID 0x0003 /* b1-0: Response PID */ 411 411 412 412 #define M66592_INVALID_REG 0x7E 413 413 414 - 415 - #define __iomem 416 414 417 415 #define get_pipectr_addr(pipenum) (M66592_PIPE1CTR + (pipenum - 1) * 2) 418 416 ··· 447 449 struct m66592 *m66592; 448 450 449 451 struct list_head queue; 450 - unsigned busy:1; 452 + unsigned busy:1; 451 453 unsigned internal_ccpl:1; /* use only control */ 452 454 453 455 /* this member can able to after m66592_enable */ ··· 475 477 struct m66592_ep *epaddr2ep[16]; 476 478 477 479 struct usb_request *ep0_req; /* for internal request */ 478 - u16 *ep0_buf; /* for internal request */ 480 + u16 ep0_data; /* for internal request */ 479 481 480 482 struct timer_list timer; 481 483 ··· 525 527 } 526 528 527 529 static inline void m66592_read_fifo(struct m66592 *m66592, 528 - unsigned long offset, 529 - void *buf, unsigned long len) 530 + unsigned long offset, 531 + void *buf, unsigned long len) 530 532 { 531 533 unsigned long fifoaddr = (unsigned long)m66592->reg + offset; 532 534 ··· 541 543 } 542 544 543 545 static inline void m66592_write_fifo(struct m66592 *m66592, 544 - unsigned long offset, 545 - void *buf, unsigned long len) 546 + unsigned long offset, 547 + void *buf, unsigned long len) 546 548 { 547 549 unsigned long fifoaddr = (unsigned long)m66592->reg + offset; 548 550 unsigned long odd = len & 0x0001; ··· 556 558 } 557 559 558 560 static inline void m66592_mdfy(struct m66592 *m66592, u16 val, u16 pat, 559 - unsigned long offset) 561 + unsigned long offset) 560 562 { 561 563 u16 tmp; 562 564 tmp = m66592_read(m66592, offset);
+13 -12
drivers/usb/gadget/serial.c
··· 33 33 #include <linux/device.h> 34 34 #include <linux/tty.h> 35 35 #include <linux/tty_flip.h> 36 + #include <linux/mutex.h> 36 37 37 38 #include <asm/byteorder.h> 38 39 #include <asm/io.h> ··· 259 258 static const char *EP_OUT_NAME; 260 259 static const char *EP_NOTIFY_NAME; 261 260 262 - static struct semaphore gs_open_close_sem[GS_NUM_PORTS]; 261 + static struct mutex gs_open_close_lock[GS_NUM_PORTS]; 263 262 264 263 static unsigned int read_q_size = GS_DEFAULT_READ_Q_SIZE; 265 264 static unsigned int write_q_size = GS_DEFAULT_WRITE_Q_SIZE; ··· 596 595 tty_set_operations(gs_tty_driver, &gs_tty_ops); 597 596 598 597 for (i=0; i < GS_NUM_PORTS; i++) 599 - sema_init(&gs_open_close_sem[i], 1); 598 + mutex_init(&gs_open_close_lock[i]); 600 599 601 600 retval = tty_register_driver(gs_tty_driver); 602 601 if (retval) { ··· 636 635 struct gs_port *port; 637 636 struct gs_dev *dev; 638 637 struct gs_buf *buf; 639 - struct semaphore *sem; 638 + struct mutex *mtx; 640 639 int ret; 641 640 642 641 port_num = tty->index; ··· 657 656 return -ENODEV; 658 657 } 659 658 660 - sem = &gs_open_close_sem[port_num]; 661 - if (down_interruptible(sem)) { 659 + mtx = &gs_open_close_lock[port_num]; 660 + if (mutex_lock_interruptible(mtx)) { 662 661 printk(KERN_ERR 663 - "gs_open: (%d,%p,%p) interrupted waiting for semaphore\n", 662 + "gs_open: (%d,%p,%p) interrupted waiting for mutex\n", 664 663 port_num, tty, file); 665 664 return -ERESTARTSYS; 666 665 } ··· 755 754 756 755 exit_unlock_port: 757 756 spin_unlock_irqrestore(&port->port_lock, flags); 758 - up(sem); 757 + mutex_unlock(mtx); 759 758 return ret; 760 759 761 760 exit_unlock_dev: 762 761 spin_unlock_irqrestore(&dev->dev_lock, flags); 763 - up(sem); 762 + mutex_unlock(mtx); 764 763 return ret; 765 764 766 765 } ··· 782 781 static void gs_close(struct tty_struct *tty, struct file *file) 783 782 { 784 783 struct gs_port *port = tty->driver_data; 785 - struct semaphore *sem; 784 + struct mutex *mtx; 786 785 787 786 if (port == NULL) { 788 787 printk(KERN_ERR "gs_close: NULL port pointer\n"); ··· 791 790 792 791 gs_debug("gs_close: (%d,%p,%p)\n", port->port_num, tty, file); 793 792 794 - sem = &gs_open_close_sem[port->port_num]; 795 - down(sem); 793 + mtx = &gs_open_close_lock[port->port_num]; 794 + mutex_lock(mtx); 796 795 797 796 spin_lock_irq(&port->port_lock); 798 797 ··· 847 846 848 847 exit: 849 848 spin_unlock_irq(&port->port_lock); 850 - up(sem); 849 + mutex_unlock(mtx); 851 850 } 852 851 853 852 /*
+142 -157
drivers/usb/host/isp116x-hcd.c
··· 228 228 struct urb, urb_list); 229 229 ptd = &ep->ptd; 230 230 len = ep->length; 231 - spin_lock(&urb->lock); 232 231 ep->data = (unsigned char *)urb->transfer_buffer 233 232 + urb->actual_length; 234 233 ··· 263 264 | PTD_EP(ep->epnum); 264 265 ptd->len = PTD_LEN(len) | PTD_DIR(dir); 265 266 ptd->faddr = PTD_FA(usb_pipedevice(urb->pipe)); 266 - spin_unlock(&urb->lock); 267 267 if (!ep->active) { 268 268 ptd->mps |= PTD_LAST_MSK; 269 269 isp116x->atl_last_dir = dir; 270 270 } 271 271 isp116x->atl_bufshrt = sizeof(struct ptd) + isp116x->atl_buflen; 272 272 isp116x->atl_buflen = isp116x->atl_bufshrt + ALIGN(len, 4); 273 - } 274 - } 275 - 276 - /* 277 - Analyze transfer results, handle partial transfers and errors 278 - */ 279 - static void postproc_atl_queue(struct isp116x *isp116x) 280 - { 281 - struct isp116x_ep *ep; 282 - struct urb *urb; 283 - struct usb_device *udev; 284 - struct ptd *ptd; 285 - int short_not_ok; 286 - u8 cc; 287 - 288 - for (ep = isp116x->atl_active; ep; ep = ep->active) { 289 - BUG_ON(list_empty(&ep->hep->urb_list)); 290 - urb = 291 - container_of(ep->hep->urb_list.next, struct urb, urb_list); 292 - udev = urb->dev; 293 - ptd = &ep->ptd; 294 - cc = PTD_GET_CC(ptd); 295 - short_not_ok = 1; 296 - spin_lock(&urb->lock); 297 - 298 - /* Data underrun is special. For allowed underrun 299 - we clear the error and continue as normal. For 300 - forbidden underrun we finish the DATA stage 301 - immediately while for control transfer, 302 - we do a STATUS stage. */ 303 - if (cc == TD_DATAUNDERRUN) { 304 - if (!(urb->transfer_flags & URB_SHORT_NOT_OK)) { 305 - DBG("Allowed data underrun\n"); 306 - cc = TD_CC_NOERROR; 307 - short_not_ok = 0; 308 - } else { 309 - ep->error_count = 1; 310 - if (usb_pipecontrol(urb->pipe)) 311 - ep->nextpid = USB_PID_ACK; 312 - else 313 - usb_settoggle(udev, ep->epnum, 314 - ep->nextpid == 315 - USB_PID_OUT, 316 - PTD_GET_TOGGLE(ptd)); 317 - urb->actual_length += PTD_GET_COUNT(ptd); 318 - urb->status = cc_to_error[TD_DATAUNDERRUN]; 319 - spin_unlock(&urb->lock); 320 - continue; 321 - } 322 - } 323 - /* Keep underrun error through the STATUS stage */ 324 - if (urb->status == cc_to_error[TD_DATAUNDERRUN]) 325 - cc = TD_DATAUNDERRUN; 326 - 327 - if (cc != TD_CC_NOERROR && cc != TD_NOTACCESSED 328 - && (++ep->error_count >= 3 || cc == TD_CC_STALL 329 - || cc == TD_DATAOVERRUN)) { 330 - if (urb->status == -EINPROGRESS) 331 - urb->status = cc_to_error[cc]; 332 - if (ep->nextpid == USB_PID_ACK) 333 - ep->nextpid = 0; 334 - spin_unlock(&urb->lock); 335 - continue; 336 - } 337 - /* According to usb spec, zero-length Int transfer signals 338 - finishing of the urb. Hey, does this apply only 339 - for IN endpoints? */ 340 - if (usb_pipeint(urb->pipe) && !PTD_GET_LEN(ptd)) { 341 - if (urb->status == -EINPROGRESS) 342 - urb->status = 0; 343 - spin_unlock(&urb->lock); 344 - continue; 345 - } 346 - 347 - /* Relax after previously failed, but later succeeded 348 - or correctly NAK'ed retransmission attempt */ 349 - if (ep->error_count 350 - && (cc == TD_CC_NOERROR || cc == TD_NOTACCESSED)) 351 - ep->error_count = 0; 352 - 353 - /* Take into account idiosyncracies of the isp116x chip 354 - regarding toggle bit for failed transfers */ 355 - if (ep->nextpid == USB_PID_OUT) 356 - usb_settoggle(udev, ep->epnum, 1, PTD_GET_TOGGLE(ptd) 357 - ^ (ep->error_count > 0)); 358 - else if (ep->nextpid == USB_PID_IN) 359 - usb_settoggle(udev, ep->epnum, 0, PTD_GET_TOGGLE(ptd) 360 - ^ (ep->error_count > 0)); 361 - 362 - switch (ep->nextpid) { 363 - case USB_PID_IN: 364 - case USB_PID_OUT: 365 - urb->actual_length += PTD_GET_COUNT(ptd); 366 - if (PTD_GET_ACTIVE(ptd) 367 - || (cc != TD_CC_NOERROR && cc < 0x0E)) 368 - break; 369 - if (urb->transfer_buffer_length != urb->actual_length) { 370 - if (short_not_ok) 371 - break; 372 - } else { 373 - if (urb->transfer_flags & URB_ZERO_PACKET 374 - && ep->nextpid == USB_PID_OUT 375 - && !(PTD_GET_COUNT(ptd) % ep->maxpacket)) { 376 - DBG("Zero packet requested\n"); 377 - break; 378 - } 379 - } 380 - /* All data for this URB is transferred, let's finish */ 381 - if (usb_pipecontrol(urb->pipe)) 382 - ep->nextpid = USB_PID_ACK; 383 - else if (urb->status == -EINPROGRESS) 384 - urb->status = 0; 385 - break; 386 - case USB_PID_SETUP: 387 - if (PTD_GET_ACTIVE(ptd) 388 - || (cc != TD_CC_NOERROR && cc < 0x0E)) 389 - break; 390 - if (urb->transfer_buffer_length == urb->actual_length) 391 - ep->nextpid = USB_PID_ACK; 392 - else if (usb_pipeout(urb->pipe)) { 393 - usb_settoggle(udev, 0, 1, 1); 394 - ep->nextpid = USB_PID_OUT; 395 - } else { 396 - usb_settoggle(udev, 0, 0, 1); 397 - ep->nextpid = USB_PID_IN; 398 - } 399 - break; 400 - case USB_PID_ACK: 401 - if (PTD_GET_ACTIVE(ptd) 402 - || (cc != TD_CC_NOERROR && cc < 0x0E)) 403 - break; 404 - if (urb->status == -EINPROGRESS) 405 - urb->status = 0; 406 - ep->nextpid = 0; 407 - break; 408 - default: 409 - BUG(); 410 - } 411 - spin_unlock(&urb->lock); 412 273 } 413 274 } 414 275 ··· 324 465 if (!--isp116x->periodic_count) { 325 466 isp116x->irqenb &= ~HCuPINT_SOF; 326 467 isp116x->irqenb |= HCuPINT_ATL; 468 + } 469 + } 470 + 471 + /* 472 + Analyze transfer results, handle partial transfers and errors 473 + */ 474 + static void postproc_atl_queue(struct isp116x *isp116x) 475 + { 476 + struct isp116x_ep *ep; 477 + struct urb *urb; 478 + struct usb_device *udev; 479 + struct ptd *ptd; 480 + int short_not_ok; 481 + int status; 482 + u8 cc; 483 + 484 + for (ep = isp116x->atl_active; ep; ep = ep->active) { 485 + BUG_ON(list_empty(&ep->hep->urb_list)); 486 + urb = 487 + container_of(ep->hep->urb_list.next, struct urb, urb_list); 488 + udev = urb->dev; 489 + ptd = &ep->ptd; 490 + cc = PTD_GET_CC(ptd); 491 + short_not_ok = 1; 492 + status = -EINPROGRESS; 493 + 494 + /* Data underrun is special. For allowed underrun 495 + we clear the error and continue as normal. For 496 + forbidden underrun we finish the DATA stage 497 + immediately while for control transfer, 498 + we do a STATUS stage. */ 499 + if (cc == TD_DATAUNDERRUN) { 500 + if (!(urb->transfer_flags & URB_SHORT_NOT_OK) || 501 + usb_pipecontrol(urb->pipe)) { 502 + DBG("Allowed or control data underrun\n"); 503 + cc = TD_CC_NOERROR; 504 + short_not_ok = 0; 505 + } else { 506 + ep->error_count = 1; 507 + usb_settoggle(udev, ep->epnum, 508 + ep->nextpid == USB_PID_OUT, 509 + PTD_GET_TOGGLE(ptd)); 510 + urb->actual_length += PTD_GET_COUNT(ptd); 511 + status = cc_to_error[TD_DATAUNDERRUN]; 512 + goto done; 513 + } 514 + } 515 + 516 + if (cc != TD_CC_NOERROR && cc != TD_NOTACCESSED 517 + && (++ep->error_count >= 3 || cc == TD_CC_STALL 518 + || cc == TD_DATAOVERRUN)) { 519 + status = cc_to_error[cc]; 520 + if (ep->nextpid == USB_PID_ACK) 521 + ep->nextpid = 0; 522 + goto done; 523 + } 524 + /* According to usb spec, zero-length Int transfer signals 525 + finishing of the urb. Hey, does this apply only 526 + for IN endpoints? */ 527 + if (usb_pipeint(urb->pipe) && !PTD_GET_LEN(ptd)) { 528 + status = 0; 529 + goto done; 530 + } 531 + 532 + /* Relax after previously failed, but later succeeded 533 + or correctly NAK'ed retransmission attempt */ 534 + if (ep->error_count 535 + && (cc == TD_CC_NOERROR || cc == TD_NOTACCESSED)) 536 + ep->error_count = 0; 537 + 538 + /* Take into account idiosyncracies of the isp116x chip 539 + regarding toggle bit for failed transfers */ 540 + if (ep->nextpid == USB_PID_OUT) 541 + usb_settoggle(udev, ep->epnum, 1, PTD_GET_TOGGLE(ptd) 542 + ^ (ep->error_count > 0)); 543 + else if (ep->nextpid == USB_PID_IN) 544 + usb_settoggle(udev, ep->epnum, 0, PTD_GET_TOGGLE(ptd) 545 + ^ (ep->error_count > 0)); 546 + 547 + switch (ep->nextpid) { 548 + case USB_PID_IN: 549 + case USB_PID_OUT: 550 + urb->actual_length += PTD_GET_COUNT(ptd); 551 + if (PTD_GET_ACTIVE(ptd) 552 + || (cc != TD_CC_NOERROR && cc < 0x0E)) 553 + break; 554 + if (urb->transfer_buffer_length != urb->actual_length) { 555 + if (short_not_ok) 556 + break; 557 + } else { 558 + if (urb->transfer_flags & URB_ZERO_PACKET 559 + && ep->nextpid == USB_PID_OUT 560 + && !(PTD_GET_COUNT(ptd) % ep->maxpacket)) { 561 + DBG("Zero packet requested\n"); 562 + break; 563 + } 564 + } 565 + /* All data for this URB is transferred, let's finish */ 566 + if (usb_pipecontrol(urb->pipe)) 567 + ep->nextpid = USB_PID_ACK; 568 + else 569 + status = 0; 570 + break; 571 + case USB_PID_SETUP: 572 + if (PTD_GET_ACTIVE(ptd) 573 + || (cc != TD_CC_NOERROR && cc < 0x0E)) 574 + break; 575 + if (urb->transfer_buffer_length == urb->actual_length) 576 + ep->nextpid = USB_PID_ACK; 577 + else if (usb_pipeout(urb->pipe)) { 578 + usb_settoggle(udev, 0, 1, 1); 579 + ep->nextpid = USB_PID_OUT; 580 + } else { 581 + usb_settoggle(udev, 0, 0, 1); 582 + ep->nextpid = USB_PID_IN; 583 + } 584 + break; 585 + case USB_PID_ACK: 586 + if (PTD_GET_ACTIVE(ptd) 587 + || (cc != TD_CC_NOERROR && cc < 0x0E)) 588 + break; 589 + if ((urb->transfer_flags & URB_SHORT_NOT_OK) && 590 + urb->actual_length < 591 + urb->transfer_buffer_length) 592 + status = -EREMOTEIO; 593 + else 594 + status = 0; 595 + ep->nextpid = 0; 596 + break; 597 + default: 598 + BUG(); 599 + } 600 + 601 + done: 602 + if (status != -EINPROGRESS) { 603 + spin_lock(&urb->lock); 604 + if (urb->status == -EINPROGRESS) 605 + urb->status = status; 606 + spin_unlock(&urb->lock); 607 + } 608 + if (urb->status != -EINPROGRESS) 609 + finish_request(isp116x, ep, urb); 327 610 } 328 611 } 329 612 ··· 571 570 */ 572 571 static void finish_atl_transfers(struct isp116x *isp116x) 573 572 { 574 - struct isp116x_ep *ep; 575 - struct urb *urb; 576 - 577 573 if (!isp116x->atl_active) 578 574 return; 579 575 /* Fifo not ready? */ ··· 580 582 atomic_inc(&isp116x->atl_finishing); 581 583 unpack_fifo(isp116x); 582 584 postproc_atl_queue(isp116x); 583 - for (ep = isp116x->atl_active; ep; ep = ep->active) { 584 - urb = 585 - container_of(ep->hep->urb_list.next, struct urb, urb_list); 586 - /* USB_PID_ACK check here avoids finishing of 587 - control transfers, for which TD_DATAUNDERRUN 588 - occured, while URB_SHORT_NOT_OK was set */ 589 - if (urb && urb->status != -EINPROGRESS 590 - && ep->nextpid != USB_PID_ACK) 591 - finish_request(isp116x, ep, urb); 592 - } 593 585 atomic_dec(&isp116x->atl_finishing); 594 586 } 595 587 ··· 809 821 } 810 822 811 823 /* in case of unlink-during-submit */ 812 - spin_lock(&urb->lock); 813 824 if (urb->status != -EINPROGRESS) { 814 - spin_unlock(&urb->lock); 815 825 finish_request(isp116x, ep, urb); 816 826 ret = 0; 817 827 goto fail; 818 828 } 819 829 urb->hcpriv = hep; 820 - spin_unlock(&urb->lock); 821 830 start_atl_transfers(isp116x); 822 831 823 832 fail:
+54 -56
drivers/usb/host/r8a66597-hcd.c
··· 35 35 #include <linux/interrupt.h> 36 36 #include <linux/usb.h> 37 37 #include <linux/platform_device.h> 38 - 39 - #include <asm/io.h> 40 - #include <asm/irq.h> 41 - #include <asm/system.h> 38 + #include <linux/io.h> 39 + #include <linux/irq.h> 42 40 43 41 #include "../core/hcd.h" 44 42 #include "r8a66597.h" ··· 52 54 /* module parameters */ 53 55 static unsigned short clock = XTAL12; 54 56 module_param(clock, ushort, 0644); 55 - MODULE_PARM_DESC(clock, "input clock: 48MHz=32768, 24MHz=16384, 12MHz=0(default=0)"); 57 + MODULE_PARM_DESC(clock, "input clock: 48MHz=32768, 24MHz=16384, 12MHz=0 " 58 + "(default=0)"); 59 + 56 60 static unsigned short vif = LDRV; 57 61 module_param(vif, ushort, 0644); 58 62 MODULE_PARM_DESC(vif, "input VIF: 3.3V=32768, 1.5V=0(default=32768)"); 59 - static unsigned short endian = 0; 63 + 64 + static unsigned short endian; 60 65 module_param(endian, ushort, 0644); 61 - MODULE_PARM_DESC(endian, "data endian: big=256, little=0(default=0)"); 66 + MODULE_PARM_DESC(endian, "data endian: big=256, little=0 (default=0)"); 67 + 62 68 static unsigned short irq_sense = INTL; 63 69 module_param(irq_sense, ushort, 0644); 64 - MODULE_PARM_DESC(irq_sense, "IRQ sense: low level=32, falling edge=0(default=32)"); 70 + MODULE_PARM_DESC(irq_sense, "IRQ sense: low level=32, falling edge=0 " 71 + "(default=32)"); 65 72 66 73 static void packet_write(struct r8a66597 *r8a66597, u16 pipenum); 67 74 static int r8a66597_get_frame(struct usb_hcd *hcd); ··· 311 308 struct r8a66597_device *dev; 312 309 int usb_address = urb->setup_packet[2]; /* urb->pipe is address 0 */ 313 310 314 - dev = kzalloc(sizeof(struct r8a66597_device), GFP_KERNEL); 311 + dev = kzalloc(sizeof(struct r8a66597_device), GFP_ATOMIC); 315 312 if (dev == NULL) 316 313 return -ENOMEM; 317 314 ··· 614 611 u16 array[R8A66597_MAX_NUM_PIPE], i = 0, min; 615 612 616 613 memset(array, 0, sizeof(array)); 617 - switch(ep->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK) { 618 - case USB_ENDPOINT_XFER_BULK: 614 + switch (ep->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK) { 615 + case USB_ENDPOINT_XFER_BULK: 619 616 if (ep->bEndpointAddress & USB_ENDPOINT_DIR_MASK) 620 617 array[i++] = 4; 621 618 else { 622 619 array[i++] = 3; 623 620 array[i++] = 5; 624 621 } 625 - break; 626 - case USB_ENDPOINT_XFER_INT: 622 + break; 623 + case USB_ENDPOINT_XFER_INT: 627 624 if (ep->bEndpointAddress & USB_ENDPOINT_DIR_MASK) { 628 625 array[i++] = 6; 629 626 array[i++] = 7; 630 627 array[i++] = 8; 631 628 } else 632 629 array[i++] = 9; 633 - break; 634 - case USB_ENDPOINT_XFER_ISOC: 630 + break; 631 + case USB_ENDPOINT_XFER_ISOC: 635 632 if (ep->bEndpointAddress & USB_ENDPOINT_DIR_MASK) 636 633 array[i++] = 2; 637 634 else 638 635 array[i++] = 1; 639 - break; 640 - default: 641 - err("Illegal type"); 642 - return 0; 643 - } 636 + break; 637 + default: 638 + err("Illegal type"); 639 + return 0; 640 + } 644 641 645 642 i = 1; 646 643 min = array[0]; ··· 657 654 { 658 655 u16 r8a66597_type; 659 656 660 - switch(type) { 657 + switch (type) { 661 658 case USB_ENDPOINT_XFER_BULK: 662 659 r8a66597_type = R8A66597_BULK; 663 660 break; ··· 877 874 { 878 875 r8a66597->root_hub[port].port |= (1 << USB_PORT_FEAT_CONNECTION) 879 876 | (1 << USB_PORT_FEAT_C_CONNECTION); 880 - r8a66597_write(r8a66597, (u16)~DTCH, get_intsts_reg(port)); 877 + r8a66597_write(r8a66597, ~DTCH, get_intsts_reg(port)); 881 878 r8a66597_bset(r8a66597, DTCHE, get_intenb_reg(port)); 882 879 } 883 880 ··· 920 917 921 918 r8a66597_write(r8a66597, make_devsel(td->address) | td->maxpacket, 922 919 DCPMAXP); 923 - r8a66597_write(r8a66597, (u16)~(SIGN | SACK), INTSTS1); 920 + r8a66597_write(r8a66597, ~(SIGN | SACK), INTSTS1); 924 921 925 922 for (i = 0; i < 4; i++) { 926 923 r8a66597_write(r8a66597, p[i], setup_addr); ··· 951 948 pipe_irq_disable(r8a66597, td->pipenum); 952 949 pipe_setting(r8a66597, td); 953 950 pipe_stop(r8a66597, td->pipe); 954 - r8a66597_write(r8a66597, (u16)~(1 << td->pipenum), 955 - BRDYSTS); 951 + r8a66597_write(r8a66597, ~(1 << td->pipenum), BRDYSTS); 956 952 957 953 if (td->pipe->pipetre) { 958 954 r8a66597_write(r8a66597, TRCLR, 959 - td->pipe->pipetre); 955 + td->pipe->pipetre); 960 956 r8a66597_write(r8a66597, 961 - (urb->transfer_buffer_length 962 - + td->maxpacket - 1) 963 - / td->maxpacket, 964 - td->pipe->pipetrn); 957 + (urb->transfer_buffer_length 958 + + td->maxpacket - 1) 959 + / td->maxpacket, 960 + td->pipe->pipetrn); 965 961 r8a66597_bset(r8a66597, TRENB, 966 - td->pipe->pipetre); 962 + td->pipe->pipetre); 967 963 } 968 964 969 965 pipe_start(r8a66597, td->pipe); ··· 993 991 if (td->pipe->pipetre) 994 992 r8a66597_bclr(r8a66597, TRENB, td->pipe->pipetre); 995 993 } 996 - r8a66597_write(r8a66597, (u16)~(1 << td->pipenum), BRDYSTS); 994 + r8a66597_write(r8a66597, ~(1 << td->pipenum), BRDYSTS); 997 995 998 996 fifo_change_from_pipe(r8a66597, td->pipe); 999 997 tmp = r8a66597_read(r8a66597, td->pipe->fifoctr); ··· 1011 1009 struct urb *urb = td->urb; 1012 1010 1013 1011 r8a66597_pipe_toggle(r8a66597, td->pipe, 1); 1012 + pipe_stop(r8a66597, td->pipe); 1014 1013 1015 1014 if (urb->setup_packet[0] & USB_ENDPOINT_DIR_MASK) { 1016 1015 r8a66597_bset(r8a66597, R8A66597_DIR, DCPCFG); 1017 1016 r8a66597_mdfy(r8a66597, ISEL, ISEL | CURPIPE, CFIFOSEL); 1018 1017 r8a66597_reg_wait(r8a66597, CFIFOSEL, CURPIPE, 0); 1019 - r8a66597_write(r8a66597, BVAL | BCLR, CFIFOCTR); 1020 - r8a66597_write(r8a66597, (u16)~BEMP0, BEMPSTS); 1018 + r8a66597_write(r8a66597, ~BEMP0, BEMPSTS); 1019 + r8a66597_write(r8a66597, BCLR, CFIFOCTR); 1020 + r8a66597_write(r8a66597, BVAL, CFIFOCTR); 1021 1021 enable_irq_empty(r8a66597, 0); 1022 1022 } else { 1023 1023 r8a66597_bclr(r8a66597, R8A66597_DIR, DCPCFG); 1024 1024 r8a66597_mdfy(r8a66597, 0, ISEL | CURPIPE, CFIFOSEL); 1025 1025 r8a66597_reg_wait(r8a66597, CFIFOSEL, CURPIPE, 0); 1026 1026 r8a66597_write(r8a66597, BCLR, CFIFOCTR); 1027 - r8a66597_write(r8a66597, (u16)~BRDY0, BRDYSTS); 1028 - r8a66597_write(r8a66597, (u16)~BEMP0, BEMPSTS); 1029 1027 enable_irq_ready(r8a66597, 0); 1030 1028 } 1031 1029 enable_irq_nrdy(r8a66597, 0); ··· 1271 1269 1272 1270 /* write fifo */ 1273 1271 if (pipenum > 0) 1274 - r8a66597_write(r8a66597, (u16)~(1 << pipenum), BEMPSTS); 1272 + r8a66597_write(r8a66597, ~(1 << pipenum), BEMPSTS); 1275 1273 if (urb->transfer_buffer) { 1276 1274 r8a66597_write_fifo(r8a66597, td->pipe->fifoaddr, buf, size); 1277 1275 if (!usb_pipebulk(urb->pipe) || td->maxpacket != size) ··· 1364 1362 1365 1363 mask = r8a66597_read(r8a66597, BRDYSTS) 1366 1364 & r8a66597_read(r8a66597, BRDYENB); 1367 - r8a66597_write(r8a66597, (u16)~mask, BRDYSTS); 1365 + r8a66597_write(r8a66597, ~mask, BRDYSTS); 1368 1366 if (mask & BRDY0) { 1369 1367 td = r8a66597_get_td(r8a66597, 0); 1370 1368 if (td && td->type == USB_PID_IN) ··· 1399 1397 1400 1398 mask = r8a66597_read(r8a66597, BEMPSTS) 1401 1399 & r8a66597_read(r8a66597, BEMPENB); 1402 - r8a66597_write(r8a66597, (u16)~mask, BEMPSTS); 1400 + r8a66597_write(r8a66597, ~mask, BEMPSTS); 1403 1401 if (mask & BEMP0) { 1404 1402 cfifo_change(r8a66597, 0); 1405 1403 td = r8a66597_get_td(r8a66597, 0); ··· 1436 1434 1437 1435 mask = r8a66597_read(r8a66597, NRDYSTS) 1438 1436 & r8a66597_read(r8a66597, NRDYENB); 1439 - r8a66597_write(r8a66597, (u16)~mask, NRDYSTS); 1437 + r8a66597_write(r8a66597, ~mask, NRDYSTS); 1440 1438 if (mask & NRDY0) { 1441 1439 cfifo_change(r8a66597, 0); 1442 1440 set_urb_error(r8a66597, 0); ··· 1490 1488 mask0 = intsts0 & intenb0 & (BEMP | NRDY | BRDY); 1491 1489 if (mask2) { 1492 1490 if (mask2 & ATTCH) { 1493 - r8a66597_write(r8a66597, (u16)~ATTCH, INTSTS2); 1491 + r8a66597_write(r8a66597, ~ATTCH, INTSTS2); 1494 1492 r8a66597_bclr(r8a66597, ATTCHE, INTENB2); 1495 1493 1496 1494 /* start usb bus sampling */ 1497 1495 start_root_hub_sampling(r8a66597, 1); 1498 1496 } 1499 1497 if (mask2 & DTCH) { 1500 - r8a66597_write(r8a66597, (u16)~DTCH, INTSTS2); 1498 + r8a66597_write(r8a66597, ~DTCH, INTSTS2); 1501 1499 r8a66597_bclr(r8a66597, DTCHE, INTENB2); 1502 1500 r8a66597_usb_disconnect(r8a66597, 1); 1503 1501 } ··· 1505 1503 1506 1504 if (mask1) { 1507 1505 if (mask1 & ATTCH) { 1508 - r8a66597_write(r8a66597, (u16)~ATTCH, INTSTS1); 1506 + r8a66597_write(r8a66597, ~ATTCH, INTSTS1); 1509 1507 r8a66597_bclr(r8a66597, ATTCHE, INTENB1); 1510 1508 1511 1509 /* start usb bus sampling */ 1512 1510 start_root_hub_sampling(r8a66597, 0); 1513 1511 } 1514 1512 if (mask1 & DTCH) { 1515 - r8a66597_write(r8a66597, (u16)~DTCH, INTSTS1); 1513 + r8a66597_write(r8a66597, ~DTCH, INTSTS1); 1516 1514 r8a66597_bclr(r8a66597, DTCHE, INTENB1); 1517 1515 r8a66597_usb_disconnect(r8a66597, 0); 1518 1516 } 1519 1517 if (mask1 & SIGN) { 1520 - r8a66597_write(r8a66597, (u16)~SIGN, INTSTS1); 1518 + r8a66597_write(r8a66597, ~SIGN, INTSTS1); 1521 1519 set_urb_error(r8a66597, 0); 1522 1520 check_next_phase(r8a66597); 1523 1521 } 1524 1522 if (mask1 & SACK) { 1525 - r8a66597_write(r8a66597, (u16)~SACK, INTSTS1); 1523 + r8a66597_write(r8a66597, ~SACK, INTSTS1); 1526 1524 check_next_phase(r8a66597); 1527 1525 } 1528 1526 } ··· 1665 1663 static int r8a66597_start(struct usb_hcd *hcd) 1666 1664 { 1667 1665 struct r8a66597 *r8a66597 = hcd_to_r8a66597(hcd); 1668 - int ret; 1669 1666 1670 1667 hcd->state = HC_STATE_RUNNING; 1671 - if ((ret = enable_controller(r8a66597)) < 0) 1672 - return ret; 1673 - 1674 - return 0; 1668 + return enable_controller(r8a66597); 1675 1669 } 1676 1670 1677 1671 static void r8a66597_stop(struct usb_hcd *hcd) ··· 1694 1696 1695 1697 static struct r8a66597_td *r8a66597_make_td(struct r8a66597 *r8a66597, 1696 1698 struct urb *urb, 1697 - struct usb_host_endpoint *hep, 1698 - gfp_t mem_flags) 1699 + struct usb_host_endpoint *hep) 1699 1700 { 1700 1701 struct r8a66597_td *td; 1701 1702 u16 pipenum; 1702 1703 1703 - td = kzalloc(sizeof(struct r8a66597_td), mem_flags); 1704 + td = kzalloc(sizeof(struct r8a66597_td), GFP_ATOMIC); 1704 1705 if (td == NULL) 1705 1706 return NULL; 1706 1707 ··· 1738 1741 } 1739 1742 1740 1743 if (!hep->hcpriv) { 1741 - hep->hcpriv = kzalloc(sizeof(struct r8a66597_pipe), mem_flags); 1744 + hep->hcpriv = kzalloc(sizeof(struct r8a66597_pipe), 1745 + GFP_ATOMIC); 1742 1746 if (!hep->hcpriv) { 1743 1747 ret = -ENOMEM; 1744 1748 goto error; ··· 1753 1755 init_pipe_config(r8a66597, urb); 1754 1756 1755 1757 set_address_zero(r8a66597, urb); 1756 - td = r8a66597_make_td(r8a66597, urb, hep, mem_flags); 1758 + td = r8a66597_make_td(r8a66597, urb, hep); 1757 1759 if (td == NULL) { 1758 1760 ret = -ENOMEM; 1759 1761 goto error;
+42 -45
drivers/usb/host/r8a66597.h
··· 203 203 #define DTLN 0x0FFF /* b11-0: FIFO received data length */ 204 204 205 205 /* Interrupt Enable Register 0 */ 206 - #define VBSE 0x8000 /* b15: VBUS interrupt */ 207 - #define RSME 0x4000 /* b14: Resume interrupt */ 208 - #define SOFE 0x2000 /* b13: Frame update interrupt */ 209 - #define DVSE 0x1000 /* b12: Device state transition interrupt */ 210 - #define CTRE 0x0800 /* b11: Control transfer stage transition interrupt */ 211 - #define BEMPE 0x0400 /* b10: Buffer empty interrupt */ 212 - #define NRDYE 0x0200 /* b9: Buffer not ready interrupt */ 213 - #define BRDYE 0x0100 /* b8: Buffer ready interrupt */ 206 + #define VBSE 0x8000 /* b15: VBUS interrupt */ 207 + #define RSME 0x4000 /* b14: Resume interrupt */ 208 + #define SOFE 0x2000 /* b13: Frame update interrupt */ 209 + #define DVSE 0x1000 /* b12: Device state transition interrupt */ 210 + #define CTRE 0x0800 /* b11: Control transfer stage transition interrupt */ 211 + #define BEMPE 0x0400 /* b10: Buffer empty interrupt */ 212 + #define NRDYE 0x0200 /* b9: Buffer not ready interrupt */ 213 + #define BRDYE 0x0100 /* b8: Buffer ready interrupt */ 214 214 215 215 /* Interrupt Enable Register 1 */ 216 216 #define OVRCRE 0x8000 /* b15: Over-current interrupt */ ··· 268 268 #define SOF_DISABLE 0x0000 /* SOF OUT Disable */ 269 269 270 270 /* Interrupt Status Register 0 */ 271 - #define VBINT 0x8000 /* b15: VBUS interrupt */ 272 - #define RESM 0x4000 /* b14: Resume interrupt */ 273 - #define SOFR 0x2000 /* b13: SOF frame update interrupt */ 274 - #define DVST 0x1000 /* b12: Device state transition interrupt */ 275 - #define CTRT 0x0800 /* b11: Control transfer stage transition interrupt */ 276 - #define BEMP 0x0400 /* b10: Buffer empty interrupt */ 277 - #define NRDY 0x0200 /* b9: Buffer not ready interrupt */ 278 - #define BRDY 0x0100 /* b8: Buffer ready interrupt */ 279 - #define VBSTS 0x0080 /* b7: VBUS input port */ 280 - #define DVSQ 0x0070 /* b6-4: Device state */ 271 + #define VBINT 0x8000 /* b15: VBUS interrupt */ 272 + #define RESM 0x4000 /* b14: Resume interrupt */ 273 + #define SOFR 0x2000 /* b13: SOF frame update interrupt */ 274 + #define DVST 0x1000 /* b12: Device state transition interrupt */ 275 + #define CTRT 0x0800 /* b11: Control transfer stage transition interrupt */ 276 + #define BEMP 0x0400 /* b10: Buffer empty interrupt */ 277 + #define NRDY 0x0200 /* b9: Buffer not ready interrupt */ 278 + #define BRDY 0x0100 /* b8: Buffer ready interrupt */ 279 + #define VBSTS 0x0080 /* b7: VBUS input port */ 280 + #define DVSQ 0x0070 /* b6-4: Device state */ 281 281 #define DS_SPD_CNFG 0x0070 /* Suspend Configured */ 282 282 #define DS_SPD_ADDR 0x0060 /* Suspend Address */ 283 283 #define DS_SPD_DFLT 0x0050 /* Suspend Default */ ··· 315 315 /* Micro Frame Number Register */ 316 316 #define UFRNM 0x0007 /* b2-0: Micro frame number */ 317 317 318 - /* USB Address / Low Power Status Recovery Register */ 319 - //#define USBADDR 0x007F /* b6-0: USB address */ 320 - 321 318 /* Default Control Pipe Maxpacket Size Register */ 322 319 /* Pipe Maxpacket Size Register */ 323 - #define DEVSEL 0xF000 /* b15-14: Device address select */ 324 - #define MAXP 0x007F /* b6-0: Maxpacket size of default control pipe */ 320 + #define DEVSEL 0xF000 /* b15-14: Device address select */ 321 + #define MAXP 0x007F /* b6-0: Maxpacket size of default control pipe */ 325 322 326 323 /* Default Control Pipe Control Register */ 327 324 #define BSTS 0x8000 /* b15: Buffer status */ ··· 363 366 #define MXPS 0x07FF /* b10-0: Maxpacket size */ 364 367 365 368 /* Pipe Cycle Configuration Register */ 366 - #define IFIS 0x1000 /* b12: Isochronous in-buffer flush mode select */ 367 - #define IITV 0x0007 /* b2-0: Isochronous interval */ 369 + #define IFIS 0x1000 /* b12: Isochronous in-buffer flush mode select */ 370 + #define IITV 0x0007 /* b2-0: Isochronous interval */ 368 371 369 372 /* Pipex Control Register */ 370 - #define BSTS 0x8000 /* b15: Buffer status */ 371 - #define INBUFM 0x4000 /* b14: IN buffer monitor (Only for PIPE1 to 5) */ 372 - #define CSCLR 0x2000 /* b13: complete-split status clear */ 373 - #define CSSTS 0x1000 /* b12: complete-split status */ 374 - #define ATREPM 0x0400 /* b10: Auto repeat mode */ 375 - #define ACLRM 0x0200 /* b9: Out buffer auto clear mode */ 376 - #define SQCLR 0x0100 /* b8: Sequence toggle bit clear */ 377 - #define SQSET 0x0080 /* b7: Sequence toggle bit set */ 378 - #define SQMON 0x0040 /* b6: Sequence toggle bit monitor */ 379 - #define PBUSY 0x0020 /* b5: pipe busy */ 380 - #define PID 0x0003 /* b1-0: Response PID */ 373 + #define BSTS 0x8000 /* b15: Buffer status */ 374 + #define INBUFM 0x4000 /* b14: IN buffer monitor (Only for PIPE1 to 5) */ 375 + #define CSCLR 0x2000 /* b13: complete-split status clear */ 376 + #define CSSTS 0x1000 /* b12: complete-split status */ 377 + #define ATREPM 0x0400 /* b10: Auto repeat mode */ 378 + #define ACLRM 0x0200 /* b9: Out buffer auto clear mode */ 379 + #define SQCLR 0x0100 /* b8: Sequence toggle bit clear */ 380 + #define SQSET 0x0080 /* b7: Sequence toggle bit set */ 381 + #define SQMON 0x0040 /* b6: Sequence toggle bit monitor */ 382 + #define PBUSY 0x0020 /* b5: pipe busy */ 383 + #define PID 0x0003 /* b1-0: Response PID */ 381 384 382 385 /* PIPExTRE */ 383 386 #define TRENB 0x0200 /* b9: Transaction counter enable */ ··· 404 407 #define make_devsel(addr) (addr << 12) 405 408 406 409 struct r8a66597_pipe_info { 407 - u16 pipenum; 408 - u16 address; /* R8A66597 HCD usb addres */ 409 - u16 epnum; 410 - u16 maxpacket; 411 - u16 type; 412 - u16 bufnum; 413 - u16 buf_bsize; 414 - u16 interval; 415 - u16 dir_in; 410 + u16 pipenum; 411 + u16 address; /* R8A66597 HCD usb addres */ 412 + u16 epnum; 413 + u16 maxpacket; 414 + u16 type; 415 + u16 bufnum; 416 + u16 buf_bsize; 417 + u16 interval; 418 + u16 dir_in; 416 419 }; 417 420 418 421 struct r8a66597_pipe {
+9 -8
drivers/usb/host/u132-hcd.c
··· 52 52 #include <linux/workqueue.h> 53 53 #include <linux/platform_device.h> 54 54 #include <linux/pci_ids.h> 55 + #include <linux/mutex.h> 55 56 #include <asm/io.h> 56 57 #include <asm/irq.h> 57 58 #include <asm/system.h> ··· 84 83 * u132_module_lock exists to protect access to global variables 85 84 * 86 85 */ 87 - static struct semaphore u132_module_lock; 86 + static struct mutex u132_module_lock; 88 87 static int u132_exiting = 0; 89 88 static int u132_instances = 0; 90 89 static struct list_head u132_static_list; ··· 259 258 struct platform_device *pdev = u132->platform_dev; 260 259 struct usb_hcd *hcd = u132_to_hcd(u132); 261 260 u132->going += 1; 262 - down(&u132_module_lock); 261 + mutex_lock(&u132_module_lock); 263 262 list_del_init(&u132->u132_list); 264 263 u132_instances -= 1; 265 - up(&u132_module_lock); 264 + mutex_unlock(&u132_module_lock); 266 265 dev_warn(&u132->platform_dev->dev, "FREEING the hcd=%p and thus the u13" 267 266 "2=%p going=%d pdev=%p\n", hcd, u132, u132->going, pdev); 268 267 usb_put_hcd(hcd); ··· 3112 3111 int retval = 0; 3113 3112 struct u132 *u132 = hcd_to_u132(hcd); 3114 3113 hcd->rsrc_start = 0; 3115 - down(&u132_module_lock); 3114 + mutex_lock(&u132_module_lock); 3116 3115 list_add_tail(&u132->u132_list, &u132_static_list); 3117 3116 u132->sequence_num = ++u132_instances; 3118 - up(&u132_module_lock); 3117 + mutex_unlock(&u132_module_lock); 3119 3118 u132_u132_init_kref(u132); 3120 3119 u132_initialise(u132, pdev); 3121 3120 hcd->product_desc = "ELAN U132 Host Controller"; ··· 3217 3216 INIT_LIST_HEAD(&u132_static_list); 3218 3217 u132_instances = 0; 3219 3218 u132_exiting = 0; 3220 - init_MUTEX(&u132_module_lock); 3219 + mutex_init(&u132_module_lock); 3221 3220 if (usb_disabled()) 3222 3221 return -ENODEV; 3223 3222 printk(KERN_INFO "driver %s built at %s on %s\n", hcd_name, __TIME__, ··· 3233 3232 { 3234 3233 struct u132 *u132; 3235 3234 struct u132 *temp; 3236 - down(&u132_module_lock); 3235 + mutex_lock(&u132_module_lock); 3237 3236 u132_exiting += 1; 3238 - up(&u132_module_lock); 3237 + mutex_unlock(&u132_module_lock); 3239 3238 list_for_each_entry_safe(u132, temp, &u132_static_list, u132_list) { 3240 3239 platform_device_unregister(u132->platform_dev); 3241 3240 } platform_driver_unregister(&u132_platform_driver);
+38 -21
drivers/usb/host/uhci-q.c
··· 827 827 * If direction is "send", change the packet ID from SETUP (0x2D) 828 828 * to OUT (0xE1). Else change it from SETUP to IN (0x69) and 829 829 * set Short Packet Detect (SPD) for all data packets. 830 + * 831 + * 0-length transfers always get treated as "send". 830 832 */ 831 - if (usb_pipeout(urb->pipe)) 833 + if (usb_pipeout(urb->pipe) || len == 0) 832 834 destination ^= (USB_PID_SETUP ^ USB_PID_OUT); 833 835 else { 834 836 destination ^= (USB_PID_SETUP ^ USB_PID_IN); ··· 841 839 * Build the DATA TDs 842 840 */ 843 841 while (len > 0) { 844 - int pktsze = min(len, maxsze); 842 + int pktsze = maxsze; 843 + 844 + if (len <= pktsze) { /* The last data packet */ 845 + pktsze = len; 846 + status &= ~TD_CTRL_SPD; 847 + } 845 848 846 849 td = uhci_alloc_td(uhci); 847 850 if (!td) ··· 873 866 goto nomem; 874 867 *plink = LINK_TO_TD(td); 875 868 876 - /* 877 - * It's IN if the pipe is an output pipe or we're not expecting 878 - * data back. 879 - */ 880 - destination &= ~TD_TOKEN_PID_MASK; 881 - if (usb_pipeout(urb->pipe) || !urb->transfer_buffer_length) 882 - destination |= USB_PID_IN; 883 - else 884 - destination |= USB_PID_OUT; 885 - 869 + /* Change direction for the status transaction */ 870 + destination ^= (USB_PID_IN ^ USB_PID_OUT); 886 871 destination |= TD_TOKEN_TOGGLE; /* End in Data1 */ 887 - 888 - status &= ~TD_CTRL_SPD; 889 872 890 873 uhci_add_td_to_urbp(td, urbp); 891 874 uhci_fill_td(td, status | TD_CTRL_IOC, ··· 1182 1185 } 1183 1186 } 1184 1187 1188 + /* Did we receive a short packet? */ 1185 1189 } else if (len < uhci_expected_length(td_token(td))) { 1186 1190 1187 - /* We received a short packet */ 1188 - if (urb->transfer_flags & URB_SHORT_NOT_OK) 1191 + /* For control transfers, go to the status TD if 1192 + * this isn't already the last data TD */ 1193 + if (qh->type == USB_ENDPOINT_XFER_CONTROL) { 1194 + if (td->list.next != urbp->td_list.prev) 1195 + ret = 1; 1196 + } 1197 + 1198 + /* For bulk and interrupt, this may be an error */ 1199 + else if (urb->transfer_flags & URB_SHORT_NOT_OK) 1189 1200 ret = -EREMOTEIO; 1190 1201 1191 1202 /* Fixup needed only if this isn't the URB's last TD */ ··· 1213 1208 1214 1209 err: 1215 1210 if (ret < 0) { 1216 - /* In case a control transfer gets an error 1217 - * during the setup stage */ 1218 - urb->actual_length = max(urb->actual_length, 0); 1219 - 1220 1211 /* Note that the queue has stopped and save 1221 1212 * the next toggle value */ 1222 1213 qh->element = UHCI_PTR_TERM; ··· 1490 1489 { 1491 1490 struct urb_priv *urbp = (struct urb_priv *) urb->hcpriv; 1492 1491 1492 + if (qh->type == USB_ENDPOINT_XFER_CONTROL) { 1493 + 1494 + /* urb->actual_length < 0 means the setup transaction didn't 1495 + * complete successfully. Either it failed or the URB was 1496 + * unlinked first. Regardless, don't confuse people with a 1497 + * negative length. */ 1498 + urb->actual_length = max(urb->actual_length, 0); 1499 + 1500 + /* Report erroneous short transfers */ 1501 + if (unlikely((urb->transfer_flags & URB_SHORT_NOT_OK) && 1502 + urb->actual_length < 1503 + urb->transfer_buffer_length && 1504 + urb->status == 0)) 1505 + urb->status = -EREMOTEIO; 1506 + } 1507 + 1493 1508 /* When giving back the first URB in an Isochronous queue, 1494 1509 * reinitialize the QH's iso-related members for the next URB. */ 1495 - if (qh->type == USB_ENDPOINT_XFER_ISOC && 1510 + else if (qh->type == USB_ENDPOINT_XFER_ISOC && 1496 1511 urbp->node.prev == &qh->queue && 1497 1512 urbp->node.next != &qh->queue) { 1498 1513 struct urb *nurb = list_entry(urbp->node.next,
+21 -24
drivers/usb/image/mdc800.c
··· 284 284 int data_received=0, wake_up; 285 285 unsigned char* b=urb->transfer_buffer; 286 286 struct mdc800_data* mdc800=urb->context; 287 + int status = urb->status; 287 288 288 - if (urb->status >= 0) 289 - { 289 + if (status >= 0) { 290 290 291 291 //dbg ("%i %i %i %i %i %i %i %i \n",b[0],b[1],b[2],b[3],b[4],b[5],b[6],b[7]); 292 292 ··· 324 324 || 325 325 ((mdc800->camera_request_ready == 3) && (mdc800->camera_busy)) 326 326 || 327 - (urb->status < 0) 327 + (status < 0) 328 328 ); 329 329 330 330 if (wake_up) ··· 376 376 static void mdc800_usb_write_notify (struct urb *urb) 377 377 { 378 378 struct mdc800_data* mdc800=urb->context; 379 + int status = urb->status; 379 380 380 - if (urb->status != 0) 381 - { 382 - err ("writing command fails (status=%i)", urb->status); 383 - } 381 + if (status != 0) 382 + err ("writing command fails (status=%i)", status); 384 383 else 385 - { 386 384 mdc800->state=READY; 387 - } 388 385 mdc800->written = 1; 389 386 wake_up (&mdc800->write_wait); 390 387 } ··· 393 396 static void mdc800_usb_download_notify (struct urb *urb) 394 397 { 395 398 struct mdc800_data* mdc800=urb->context; 399 + int status = urb->status; 396 400 397 - if (urb->status == 0) 398 - { 401 + if (status == 0) { 399 402 /* Fill output buffer with these data */ 400 403 memcpy (mdc800->out, urb->transfer_buffer, 64); 401 404 mdc800->out_count=64; ··· 405 408 { 406 409 mdc800->state=READY; 407 410 } 408 - } 409 - else 410 - { 411 - err ("request bytes fails (status:%i)", urb->status); 411 + } else { 412 + err ("request bytes fails (status:%i)", status); 412 413 } 413 414 mdc800->downloaded = 1; 414 415 wake_up (&mdc800->download_wait); ··· 644 649 645 650 retval=0; 646 651 mdc800->irq_urb->dev = mdc800->dev; 647 - if (usb_submit_urb (mdc800->irq_urb, GFP_KERNEL)) 648 - { 649 - err ("request USB irq fails (submit_retval=%i urb_status=%i).",retval, mdc800->irq_urb->status); 652 + retval = usb_submit_urb (mdc800->irq_urb, GFP_KERNEL); 653 + if (retval) { 654 + err ("request USB irq fails (submit_retval=%i).", retval); 650 655 errn = -EIO; 651 656 goto error_out; 652 657 } ··· 693 698 { 694 699 size_t left=len, sts=len; /* single transfer size */ 695 700 char __user *ptr = buf; 701 + int retval; 696 702 697 703 mutex_lock(&mdc800->io_lock); 698 704 if (mdc800->state == NOT_CONNECTED) ··· 733 737 734 738 /* Download -> Request new bytes */ 735 739 mdc800->download_urb->dev = mdc800->dev; 736 - if (usb_submit_urb (mdc800->download_urb, GFP_KERNEL)) 737 - { 738 - err ("Can't submit download urb (status=%i)",mdc800->download_urb->status); 740 + retval = usb_submit_urb (mdc800->download_urb, GFP_KERNEL); 741 + if (retval) { 742 + err ("Can't submit download urb (retval=%i)",retval); 739 743 mutex_unlock(&mdc800->io_lock); 740 744 return len-left; 741 745 } ··· 784 788 static ssize_t mdc800_device_write (struct file *file, const char __user *buf, size_t len, loff_t *pos) 785 789 { 786 790 size_t i=0; 791 + int retval; 787 792 788 793 mutex_lock(&mdc800->io_lock); 789 794 if (mdc800->state != READY) ··· 851 854 mdc800->state=WORKING; 852 855 memcpy (mdc800->write_urb->transfer_buffer, mdc800->in,8); 853 856 mdc800->write_urb->dev = mdc800->dev; 854 - if (usb_submit_urb (mdc800->write_urb, GFP_KERNEL)) 855 - { 856 - err ("submitting write urb fails (status=%i)", mdc800->write_urb->status); 857 + retval = usb_submit_urb (mdc800->write_urb, GFP_KERNEL); 858 + if (retval) { 859 + err ("submitting write urb fails (retval=%i)", retval); 857 860 mutex_unlock(&mdc800->io_lock); 858 861 return -EIO; 859 862 }
+10 -9
drivers/usb/image/microtek.c
··· 189 189 #define MTS_DEBUG_INT() \ 190 190 do { MTS_DEBUG_GOT_HERE(); \ 191 191 MTS_DEBUG("transfer = 0x%x context = 0x%x\n",(int)transfer,(int)context ); \ 192 - MTS_DEBUG("status = 0x%x data-length = 0x%x sent = 0x%x\n",(int)transfer->status,(int)context->data_length, (int)transfer->actual_length ); \ 192 + MTS_DEBUG("status = 0x%x data-length = 0x%x sent = 0x%x\n",transfer->status,(int)context->data_length, (int)transfer->actual_length ); \ 193 193 mts_debug_dump(context->instance);\ 194 194 } while(0) 195 195 #else ··· 393 393 context 394 394 ); 395 395 396 - transfer->status = 0; 397 - 398 396 res = usb_submit_urb( transfer, GFP_ATOMIC ); 399 397 if ( unlikely(res) ) { 400 398 MTS_INT_ERROR( "could not submit URB! Error was %d\n",(int)res ); ··· 442 444 static void mts_data_done( struct urb* transfer ) 443 445 /* Interrupt context! */ 444 446 { 447 + int status = transfer->status; 445 448 MTS_INT_INIT(); 446 449 447 450 if ( context->data_length != transfer->actual_length ) { 448 451 context->srb->resid = context->data_length - transfer->actual_length; 449 - } else if ( unlikely(transfer->status) ) { 450 - context->srb->result = (transfer->status == -ENOENT ? DID_ABORT : DID_ERROR)<<16; 452 + } else if ( unlikely(status) ) { 453 + context->srb->result = (status == -ENOENT ? DID_ABORT : DID_ERROR)<<16; 451 454 } 452 455 453 456 mts_get_status(transfer); ··· 460 461 static void mts_command_done( struct urb *transfer ) 461 462 /* Interrupt context! */ 462 463 { 464 + int status = transfer->status; 463 465 MTS_INT_INIT(); 464 466 465 - if ( unlikely(transfer->status) ) { 466 - if (transfer->status == -ENOENT) { 467 + if ( unlikely(status) ) { 468 + if (status == -ENOENT) { 467 469 /* We are being killed */ 468 470 MTS_DEBUG_GOT_HERE(); 469 471 context->srb->result = DID_ABORT<<16; ··· 502 502 static void mts_do_sg (struct urb* transfer) 503 503 { 504 504 struct scatterlist * sg; 505 + int status = transfer->status; 505 506 MTS_INT_INIT(); 506 507 507 508 MTS_DEBUG("Processing fragment %d of %d\n", context->fragment,context->srb->use_sg); 508 509 509 - if (unlikely(transfer->status)) { 510 - context->srb->result = (transfer->status == -ENOENT ? DID_ABORT : DID_ERROR)<<16; 510 + if (unlikely(status)) { 511 + context->srb->result = (status == -ENOENT ? DID_ABORT : DID_ERROR)<<16; 511 512 mts_transfer_cleanup(transfer); 512 513 } 513 514
+31 -28
drivers/usb/misc/adutux.c
··· 24 24 #include <linux/slab.h> 25 25 #include <linux/module.h> 26 26 #include <linux/usb.h> 27 + #include <linux/mutex.h> 27 28 #include <asm/uaccess.h> 28 29 29 30 #ifdef CONFIG_USB_DEBUG ··· 81 80 82 81 /* Structure to hold all of our device specific stuff */ 83 82 struct adu_device { 84 - struct semaphore sem; /* locks this structure */ 83 + struct mutex mtx; /* locks this structure */ 85 84 struct usb_device* udev; /* save off the usb device pointer */ 86 85 struct usb_interface* interface; 87 86 unsigned char minor; /* the starting minor number for this device */ ··· 179 178 static void adu_interrupt_in_callback(struct urb *urb) 180 179 { 181 180 struct adu_device *dev = urb->context; 181 + int status = urb->status; 182 182 183 - dbg(4," %s : enter, status %d", __FUNCTION__, urb->status); 183 + dbg(4," %s : enter, status %d", __FUNCTION__, status); 184 184 adu_debug_data(5, __FUNCTION__, urb->actual_length, 185 185 urb->transfer_buffer); 186 186 187 187 spin_lock(&dev->buflock); 188 188 189 - if (urb->status != 0) { 190 - if ((urb->status != -ENOENT) && (urb->status != -ECONNRESET)) { 189 + if (status != 0) { 190 + if ((status != -ENOENT) && (status != -ECONNRESET)) { 191 191 dbg(1," %s : nonzero status received: %d", 192 - __FUNCTION__, urb->status); 192 + __FUNCTION__, status); 193 193 } 194 194 goto exit; 195 195 } ··· 218 216 wake_up_interruptible(&dev->read_wait); 219 217 adu_debug_data(5, __FUNCTION__, urb->actual_length, 220 218 urb->transfer_buffer); 221 - dbg(4," %s : leave, status %d", __FUNCTION__, urb->status); 219 + dbg(4," %s : leave, status %d", __FUNCTION__, status); 222 220 } 223 221 224 222 static void adu_interrupt_out_callback(struct urb *urb) 225 223 { 226 224 struct adu_device *dev = urb->context; 225 + int status = urb->status; 227 226 228 - dbg(4," %s : enter, status %d", __FUNCTION__, urb->status); 227 + dbg(4," %s : enter, status %d", __FUNCTION__, status); 229 228 adu_debug_data(5,__FUNCTION__, urb->actual_length, urb->transfer_buffer); 230 229 231 - if (urb->status != 0) { 232 - if ((urb->status != -ENOENT) && 233 - (urb->status != -ECONNRESET)) { 230 + if (status != 0) { 231 + if ((status != -ENOENT) && 232 + (status != -ECONNRESET)) { 234 233 dbg(1, " %s :nonzero status received: %d", 235 - __FUNCTION__, urb->status); 234 + __FUNCTION__, status); 236 235 } 237 236 goto exit; 238 237 } ··· 243 240 244 241 adu_debug_data(5, __FUNCTION__, urb->actual_length, 245 242 urb->transfer_buffer); 246 - dbg(4," %s : leave, status %d", __FUNCTION__, urb->status); 243 + dbg(4," %s : leave, status %d", __FUNCTION__, status); 247 244 } 248 245 249 246 static int adu_open(struct inode *inode, struct file *file) ··· 272 269 } 273 270 274 271 /* lock this device */ 275 - if ((retval = down_interruptible(&dev->sem))) { 276 - dbg(2, "%s : sem down failed", __FUNCTION__); 272 + if ((retval = mutex_lock_interruptible(&dev->mtx))) { 273 + dbg(2, "%s : mutex lock failed", __FUNCTION__); 277 274 goto exit_no_device; 278 275 } 279 276 ··· 302 299 if (retval) 303 300 --dev->open_count; 304 301 } 305 - up(&dev->sem); 302 + mutex_unlock(&dev->mtx); 306 303 307 304 exit_no_device: 308 305 dbg(2,"%s : leave, return value %d ", __FUNCTION__, retval); ··· 350 347 } 351 348 352 349 /* lock our device */ 353 - down(&dev->sem); /* not interruptible */ 350 + mutex_lock(&dev->mtx); /* not interruptible */ 354 351 355 352 if (dev->open_count <= 0) { 356 353 dbg(1," %s : device not opened", __FUNCTION__); ··· 360 357 361 358 if (dev->udev == NULL) { 362 359 /* the device was unplugged before the file was released */ 363 - up(&dev->sem); 360 + mutex_unlock(&dev->mtx); 364 361 adu_delete(dev); 365 362 dev = NULL; 366 363 } else { ··· 370 367 371 368 exit: 372 369 if (dev) 373 - up(&dev->sem); 370 + mutex_unlock(&dev->mtx); 374 371 dbg(2," %s : leave, return value %d", __FUNCTION__, retval); 375 372 return retval; 376 373 } ··· 393 390 dev = file->private_data; 394 391 dbg(2," %s : dev=%p", __FUNCTION__, dev); 395 392 /* lock this object */ 396 - if (down_interruptible(&dev->sem)) 393 + if (mutex_lock_interruptible(&dev->mtx)) 397 394 return -ERESTARTSYS; 398 395 399 396 /* verify that the device wasn't unplugged */ ··· 525 522 526 523 exit: 527 524 /* unlock the device */ 528 - up(&dev->sem); 525 + mutex_unlock(&dev->mtx); 529 526 530 527 dbg(2," %s : leave, return value %d", __FUNCTION__, retval); 531 528 return retval; ··· 546 543 dev = file->private_data; 547 544 548 545 /* lock this object */ 549 - retval = down_interruptible(&dev->sem); 546 + retval = mutex_lock_interruptible(&dev->mtx); 550 547 if (retval) 551 548 goto exit_nolock; 552 549 ··· 574 571 retval = -EINTR; 575 572 goto exit; 576 573 } 577 - up(&dev->sem); 574 + mutex_unlock(&dev->mtx); 578 575 timeout = interruptible_sleep_on_timeout(&dev->write_wait, timeout); 579 - retval = down_interruptible(&dev->sem); 576 + retval = mutex_lock_interruptible(&dev->mtx); 580 577 if (retval) { 581 578 retval = bytes_written ? bytes_written : retval; 582 579 goto exit_nolock; ··· 641 638 642 639 exit: 643 640 /* unlock the device */ 644 - up(&dev->sem); 641 + mutex_unlock(&dev->mtx); 645 642 exit_nolock: 646 643 647 644 dbg(2," %s : leave, return value %d", __FUNCTION__, retval); ··· 701 698 goto exit; 702 699 } 703 700 704 - init_MUTEX(&dev->sem); 701 + mutex_init(&dev->mtx); 705 702 spin_lock_init(&dev->buflock); 706 703 dev->udev = udev; 707 704 init_waitqueue_head(&dev->read_wait); ··· 838 835 usb_deregister_dev(interface, &adu_class); 839 836 dev->minor = 0; 840 837 841 - down(&dev->sem); /* not interruptible */ 838 + mutex_lock(&dev->mtx); /* not interruptible */ 842 839 843 840 /* if the device is not opened, then we clean up right now */ 844 841 dbg(2," %s : open count %d", __FUNCTION__, dev->open_count); 845 842 if (!dev->open_count) { 846 - up(&dev->sem); 843 + mutex_unlock(&dev->mtx); 847 844 adu_delete(dev); 848 845 } else { 849 846 dev->udev = NULL; 850 - up(&dev->sem); 847 + mutex_unlock(&dev->mtx); 851 848 } 852 849 853 850 dev_info(&interface->dev, "ADU device adutux%d now disconnected",
+5 -4
drivers/usb/misc/appledisplay.c
··· 88 88 { 89 89 struct appledisplay *pdata = urb->context; 90 90 unsigned long flags; 91 + int status = urb->status; 91 92 int retval; 92 93 93 - switch (urb->status) { 94 + switch (status) { 94 95 case 0: 95 96 /* success */ 96 97 break; ··· 103 102 case -ENOENT: 104 103 case -ESHUTDOWN: 105 104 /* This urb is terminated, clean up */ 106 - dbg("%s - urb shutting down with status: %d", 107 - __FUNCTION__, urb->status); 105 + dbg("%s - urb shuttingdown with status: %d", 106 + __FUNCTION__, status); 108 107 return; 109 108 default: 110 109 dbg("%s - nonzero urb status received: %d", 111 - __FUNCTION__, urb->status); 110 + __FUNCTION__, status); 112 111 goto exit; 113 112 } 114 113
+15 -10
drivers/usb/misc/auerswald.c
··· 862 862 pauerbuf_t bp = (pauerbuf_t) urb->context; 863 863 pauerswald_t cp; 864 864 int ret; 865 + int status = urb->status; 866 + 865 867 dbg ("auerswald_ctrlread_wretcomplete called"); 866 - dbg ("complete with status: %d", urb->status); 868 + dbg ("complete with status: %d", status); 867 869 cp = ((pauerswald_t)((char *)(bp->list)-(unsigned long)(&((pauerswald_t)0)->bufctl))); 868 870 869 871 /* check if it is possible to advance */ 870 - if (!auerswald_status_retry (urb->status) || !cp->usbdev) { 872 + if (!auerswald_status_retry(status) || !cp->usbdev) { 871 873 /* reuse the buffer */ 872 - err ("control dummy: transmission error %d, can not retry", urb->status); 874 + err ("control dummy: transmission error %d, can not retry", status); 873 875 auerbuf_releasebuf (bp); 874 876 /* Wake up all processes waiting for a buffer */ 875 877 wake_up (&cp->bufferwait); ··· 904 902 pauerswald_t cp; 905 903 pauerscon_t scp; 906 904 pauerbuf_t bp = (pauerbuf_t) urb->context; 905 + int status = urb->status; 907 906 int ret; 907 + 908 908 dbg ("auerswald_ctrlread_complete called"); 909 909 910 910 cp = ((pauerswald_t)((char *)(bp->list)-(unsigned long)(&((pauerswald_t)0)->bufctl))); 911 911 912 912 /* check if there is valid data in this urb */ 913 - if (urb->status) { 914 - dbg ("complete with non-zero status: %d", urb->status); 913 + if (status) { 914 + dbg ("complete with non-zero status: %d", status); 915 915 /* should we do a retry? */ 916 - if (!auerswald_status_retry (urb->status) 916 + if (!auerswald_status_retry(status) 917 917 || !cp->usbdev 918 918 || (cp->version < AUV_RETRY) 919 919 || (bp->retries >= AU_RETRIES)) { 920 920 /* reuse the buffer */ 921 - err ("control read: transmission error %d, can not retry", urb->status); 921 + err ("control read: transmission error %d, can not retry", status); 922 922 auerbuf_releasebuf (bp); 923 923 /* Wake up all processes waiting for a buffer */ 924 924 wake_up (&cp->bufferwait); ··· 978 974 unsigned int channelid; 979 975 unsigned int bytecount; 980 976 int ret; 977 + int status = urb->status; 981 978 pauerbuf_t bp = NULL; 982 979 pauerswald_t cp = (pauerswald_t) urb->context; 983 980 984 981 dbg ("%s called", __FUNCTION__); 985 982 986 - switch (urb->status) { 983 + switch (status) { 987 984 case 0: 988 985 /* success */ 989 986 break; ··· 992 987 case -ENOENT: 993 988 case -ESHUTDOWN: 994 989 /* this urb is terminated, clean up */ 995 - dbg("%s - urb shutting down with status: %d", __FUNCTION__, urb->status); 990 + dbg("%s - urb shutting down with status: %d", __FUNCTION__, status); 996 991 return; 997 992 default: 998 - dbg("%s - nonzero urb status received: %d", __FUNCTION__, urb->status); 993 + dbg("%s - nonzero urb status received: %d", __FUNCTION__, status); 999 994 goto exit; 1000 995 } 1001 996
+12 -9
drivers/usb/misc/ftdi-elan.c
··· 44 44 #include <linux/slab.h> 45 45 #include <linux/module.h> 46 46 #include <linux/kref.h> 47 + #include <linux/mutex.h> 47 48 #include <asm/uaccess.h> 48 49 #include <linux/usb.h> 49 50 #include <linux/workqueue.h> ··· 65 64 * ftdi_module_lock exists to protect access to global variables 66 65 * 67 66 */ 68 - static struct semaphore ftdi_module_lock; 67 + static struct mutex ftdi_module_lock; 69 68 static int ftdi_instances = 0; 70 69 static struct list_head ftdi_static_list; 71 70 /* ··· 200 199 dev_warn(&ftdi->udev->dev, "FREEING ftdi=%p\n", ftdi); 201 200 usb_put_dev(ftdi->udev); 202 201 ftdi->disconnected += 1; 203 - down(&ftdi_module_lock); 202 + mutex_lock(&ftdi_module_lock); 204 203 list_del_init(&ftdi->ftdi_list); 205 204 ftdi_instances -= 1; 206 - up(&ftdi_module_lock); 205 + mutex_unlock(&ftdi_module_lock); 207 206 kfree(ftdi->bulk_in_buffer); 208 207 ftdi->bulk_in_buffer = NULL; 209 208 } ··· 747 746 static void ftdi_elan_write_bulk_callback(struct urb *urb) 748 747 { 749 748 struct usb_ftdi *ftdi = (struct usb_ftdi *)urb->context; 750 - if (urb->status && !(urb->status == -ENOENT || urb->status == 751 - -ECONNRESET || urb->status == -ESHUTDOWN)) { 749 + int status = urb->status; 750 + 751 + if (status && !(status == -ENOENT || status == -ECONNRESET || 752 + status == -ESHUTDOWN)) { 752 753 dev_err(&ftdi->udev->dev, "urb=%p write bulk status received: %" 753 - "d\n", urb, urb->status); 754 + "d\n", urb, status); 754 755 } 755 756 usb_buffer_free(urb->dev, urb->transfer_buffer_length, 756 757 urb->transfer_buffer, urb->transfer_dma); ··· 2783 2780 return -ENOMEM; 2784 2781 } 2785 2782 memset(ftdi, 0x00, sizeof(struct usb_ftdi)); 2786 - down(&ftdi_module_lock); 2783 + mutex_lock(&ftdi_module_lock); 2787 2784 list_add_tail(&ftdi->ftdi_list, &ftdi_static_list); 2788 2785 ftdi->sequence_num = ++ftdi_instances; 2789 - up(&ftdi_module_lock); 2786 + mutex_unlock(&ftdi_module_lock); 2790 2787 ftdi_elan_init_kref(ftdi); 2791 2788 init_MUTEX(&ftdi->sw_lock); 2792 2789 ftdi->udev = usb_get_dev(interface_to_usbdev(interface)); ··· 2912 2909 int result; 2913 2910 printk(KERN_INFO "driver %s built at %s on %s\n", ftdi_elan_driver.name, 2914 2911 __TIME__, __DATE__); 2915 - init_MUTEX(&ftdi_module_lock); 2912 + mutex_init(&ftdi_module_lock); 2916 2913 INIT_LIST_HEAD(&ftdi_static_list); 2917 2914 status_queue = create_singlethread_workqueue("ftdi-status-control"); 2918 2915 if (!status_queue)
+12 -9
drivers/usb/misc/iowarrior.c
··· 158 158 int read_idx; 159 159 int aux_idx; 160 160 int offset; 161 - int status; 161 + int status = urb->status; 162 + int retval; 162 163 163 - switch (urb->status) { 164 + switch (status) { 164 165 case 0: 165 166 /* success */ 166 167 break; ··· 214 213 wake_up_interruptible(&dev->read_wait); 215 214 216 215 exit: 217 - status = usb_submit_urb(urb, GFP_ATOMIC); 218 - if (status) 216 + retval = usb_submit_urb(urb, GFP_ATOMIC); 217 + if (retval) 219 218 dev_err(&dev->interface->dev, "%s - usb_submit_urb failed with result %d", 220 - __FUNCTION__, status); 219 + __FUNCTION__, retval); 221 220 222 221 } 223 222 ··· 227 226 static void iowarrior_write_callback(struct urb *urb) 228 227 { 229 228 struct iowarrior *dev; 229 + int status = urb->status; 230 + 230 231 dev = (struct iowarrior *)urb->context; 231 232 /* sync/async unlink faults aren't errors */ 232 - if (urb->status && 233 - !(urb->status == -ENOENT || 234 - urb->status == -ECONNRESET || urb->status == -ESHUTDOWN)) { 233 + if (status && 234 + !(status == -ENOENT || 235 + status == -ECONNRESET || status == -ESHUTDOWN)) { 235 236 dbg("%s - nonzero write bulk status received: %d", 236 - __func__, urb->status); 237 + __func__, status); 237 238 } 238 239 /* free up our allocated buffer */ 239 240 usb_buffer_free(urb->dev, urb->transfer_buffer_length,
+11 -9
drivers/usb/misc/ldusb.c
··· 219 219 struct ld_usb *dev = urb->context; 220 220 size_t *actual_buffer; 221 221 unsigned int next_ring_head; 222 + int status = urb->status; 222 223 int retval; 223 224 224 - if (urb->status) { 225 - if (urb->status == -ENOENT || 226 - urb->status == -ECONNRESET || 227 - urb->status == -ESHUTDOWN) { 225 + if (status) { 226 + if (status == -ENOENT || 227 + status == -ECONNRESET || 228 + status == -ESHUTDOWN) { 228 229 goto exit; 229 230 } else { 230 231 dbg_info(&dev->intf->dev, "%s: nonzero status received: %d\n", 231 - __FUNCTION__, urb->status); 232 + __FUNCTION__, status); 232 233 spin_lock(&dev->rbsl); 233 234 goto resubmit; /* maybe we can recover */ 234 235 } ··· 276 275 static void ld_usb_interrupt_out_callback(struct urb *urb) 277 276 { 278 277 struct ld_usb *dev = urb->context; 278 + int status = urb->status; 279 279 280 280 /* sync/async unlink faults aren't errors */ 281 - if (urb->status && !(urb->status == -ENOENT || 282 - urb->status == -ECONNRESET || 283 - urb->status == -ESHUTDOWN)) 281 + if (status && !(status == -ENOENT || 282 + status == -ECONNRESET || 283 + status == -ESHUTDOWN)) 284 284 dbg_info(&dev->intf->dev, 285 285 "%s - nonzero write interrupt status received: %d\n", 286 - __FUNCTION__, urb->status); 286 + __FUNCTION__, status); 287 287 288 288 dev->interrupt_out_busy = 0; 289 289 wake_up_interruptible(&dev->write_wait);
+15 -13
drivers/usb/misc/legousbtower.c
··· 742 742 static void tower_interrupt_in_callback (struct urb *urb) 743 743 { 744 744 struct lego_usb_tower *dev = (struct lego_usb_tower *)urb->context; 745 + int status = urb->status; 745 746 int retval; 746 747 747 - dbg(4, "%s: enter, status %d", __FUNCTION__, urb->status); 748 + dbg(4, "%s: enter, status %d", __FUNCTION__, status); 748 749 749 750 lego_usb_tower_debug_data(5, __FUNCTION__, urb->actual_length, urb->transfer_buffer); 750 751 751 - if (urb->status) { 752 - if (urb->status == -ENOENT || 753 - urb->status == -ECONNRESET || 754 - urb->status == -ESHUTDOWN) { 752 + if (status) { 753 + if (status == -ENOENT || 754 + status == -ECONNRESET || 755 + status == -ESHUTDOWN) { 755 756 goto exit; 756 757 } else { 757 - dbg(1, "%s: nonzero status received: %d", __FUNCTION__, urb->status); 758 + dbg(1, "%s: nonzero status received: %d", __FUNCTION__, status); 758 759 goto resubmit; /* maybe we can recover */ 759 760 } 760 761 } ··· 789 788 wake_up_interruptible (&dev->read_wait); 790 789 791 790 lego_usb_tower_debug_data(5, __FUNCTION__, urb->actual_length, urb->transfer_buffer); 792 - dbg(4, "%s: leave, status %d", __FUNCTION__, urb->status); 791 + dbg(4, "%s: leave, status %d", __FUNCTION__, status); 793 792 } 794 793 795 794 ··· 799 798 static void tower_interrupt_out_callback (struct urb *urb) 800 799 { 801 800 struct lego_usb_tower *dev = (struct lego_usb_tower *)urb->context; 801 + int status = urb->status; 802 802 803 - dbg(4, "%s: enter, status %d", __FUNCTION__, urb->status); 803 + dbg(4, "%s: enter, status %d", __FUNCTION__, status); 804 804 lego_usb_tower_debug_data(5, __FUNCTION__, urb->actual_length, urb->transfer_buffer); 805 805 806 806 /* sync/async unlink faults aren't errors */ 807 - if (urb->status && !(urb->status == -ENOENT || 808 - urb->status == -ECONNRESET || 809 - urb->status == -ESHUTDOWN)) { 807 + if (status && !(status == -ENOENT || 808 + status == -ECONNRESET || 809 + status == -ESHUTDOWN)) { 810 810 dbg(1, "%s - nonzero write bulk status received: %d", 811 - __FUNCTION__, urb->status); 811 + __FUNCTION__, status); 812 812 } 813 813 814 814 dev->interrupt_out_busy = 0; 815 815 wake_up_interruptible(&dev->write_wait); 816 816 817 817 lego_usb_tower_debug_data(5, __FUNCTION__, urb->actual_length, urb->transfer_buffer); 818 - dbg(4, "%s: leave, status %d", __FUNCTION__, urb->status); 818 + dbg(4, "%s: leave, status %d", __FUNCTION__, status); 819 819 } 820 820 821 821
+7 -6
drivers/usb/misc/phidgetkit.c
··· 305 305 struct interfacekit *kit = urb->context; 306 306 unsigned char *buffer = kit->data; 307 307 int i, level, sensor; 308 - int status; 308 + int retval; 309 + int status = urb->status; 309 310 310 - switch (urb->status) { 311 + switch (status) { 311 312 case 0: /* success */ 312 313 break; 313 314 case -ECONNRESET: /* unlink */ ··· 378 377 schedule_delayed_work(&kit->do_notify, 0); 379 378 380 379 resubmit: 381 - status = usb_submit_urb(urb, GFP_ATOMIC); 382 - if (status) 383 - err("can't resubmit intr, %s-%s/interfacekit0, status %d", 380 + retval = usb_submit_urb(urb, GFP_ATOMIC); 381 + if (retval) 382 + err("can't resubmit intr, %s-%s/interfacekit0, retval %d", 384 383 kit->udev->bus->bus_name, 385 - kit->udev->devpath, status); 384 + kit->udev->devpath, retval); 386 385 } 387 386 388 387 static void do_notify(struct work_struct *work)
+7 -6
drivers/usb/misc/phidgetmotorcontrol.c
··· 95 95 struct motorcontrol *mc = urb->context; 96 96 unsigned char *buffer = mc->data; 97 97 int i, level; 98 - int status; 98 + int retval; 99 + int status = urb->status;; 99 100 100 - switch (urb->status) { 101 + switch (status) { 101 102 case 0: /* success */ 102 103 break; 103 104 case -ECONNRESET: /* unlink */ ··· 152 151 schedule_delayed_work(&mc->do_notify, 0); 153 152 154 153 resubmit: 155 - status = usb_submit_urb(urb, GFP_ATOMIC); 156 - if (status) 154 + retval = usb_submit_urb(urb, GFP_ATOMIC); 155 + if (retval) 157 156 dev_err(&mc->intf->dev, 158 - "can't resubmit intr, %s-%s/motorcontrol0, status %d", 157 + "can't resubmit intr, %s-%s/motorcontrol0, retval %d", 159 158 mc->udev->bus->bus_name, 160 - mc->udev->devpath, status); 159 + mc->udev->devpath, retval); 161 160 } 162 161 163 162 static void do_notify(struct work_struct *work)
+6 -5
drivers/usb/misc/usblcd.c
··· 176 176 static void lcd_write_bulk_callback(struct urb *urb) 177 177 { 178 178 struct usb_lcd *dev; 179 + int status = urb->status; 179 180 180 181 dev = (struct usb_lcd *)urb->context; 181 182 182 183 /* sync/async unlink faults aren't errors */ 183 - if (urb->status && 184 - !(urb->status == -ENOENT || 185 - urb->status == -ECONNRESET || 186 - urb->status == -ESHUTDOWN)) { 184 + if (status && 185 + !(status == -ENOENT || 186 + status == -ECONNRESET || 187 + status == -ESHUTDOWN)) { 187 188 dbg("USBLCD: %s - nonzero write bulk status received: %d", 188 - __FUNCTION__, urb->status); 189 + __FUNCTION__, status); 189 190 } 190 191 191 192 /* free up our allocated buffer */
+2 -2
drivers/usb/misc/usbtest.c
··· 768 768 769 769 /* some faults are allowed, not required */ 770 770 if (subcase->expected > 0 && ( 771 - ((urb->status == -subcase->expected /* happened */ 772 - || urb->status == 0)))) /* didn't */ 771 + ((status == -subcase->expected /* happened */ 772 + || status == 0)))) /* didn't */ 773 773 status = 0; 774 774 /* sometimes more than one fault is allowed */ 775 775 else if (subcase->number == 12 && status == -EPIPE)
+3 -2
drivers/usb/misc/uss720.c
··· 111 111 struct uss720_async_request *rq; 112 112 struct parport *pp; 113 113 struct parport_uss720_private *priv; 114 + int status = urb->status; 114 115 115 116 rq = urb->context; 116 117 priv = rq->priv; 117 118 pp = priv->pp; 118 - if (urb->status) { 119 - err("async_complete: urb error %d", urb->status); 119 + if (status) { 120 + err("async_complete: urb error %d", status); 120 121 } else if (rq->dr.bRequest == 3) { 121 122 memcpy(priv->reg, rq->reg, sizeof(priv->reg)); 122 123 #if 0
+4 -6
drivers/usb/serial/io_ti.c
··· 2794 2794 2795 2795 dbg ("%s", __FUNCTION__); 2796 2796 2797 - for (i=0; i < serial->num_ports; ++i) { 2797 + for (i = 0; i < serial->num_ports; ++i) { 2798 2798 edge_port = usb_get_serial_port_data(serial->port[i]); 2799 2799 edge_remove_sysfs_attrs(edge_port->port); 2800 - if (edge_port) { 2801 - edge_buf_free(edge_port->ep_out_buf); 2802 - kfree(edge_port); 2803 - } 2800 + edge_buf_free(edge_port->ep_out_buf); 2801 + kfree(edge_port); 2804 2802 usb_set_serial_port_data(serial->port[i], NULL); 2805 2803 } 2806 - kfree (usb_get_serial_data(serial)); 2804 + kfree(usb_get_serial_data(serial)); 2807 2805 usb_set_serial_data(serial, NULL); 2808 2806 } 2809 2807
-5
drivers/usb/serial/mos7720.c
··· 110 110 111 111 dbg("%s"," : Entering\n"); 112 112 113 - if (!urb) { 114 - dbg("%s","Invalid Pointer !!!!:\n"); 115 - return; 116 - } 117 - 118 113 switch (status) { 119 114 case 0: 120 115 /* success */
-19
drivers/usb/serial/mos7840.c
··· 436 436 int result = 0; 437 437 int status = urb->status; 438 438 439 - if (!urb) { 440 - dbg("%s", "Invalid Pointer !!!!:\n"); 441 - return; 442 - } 443 - 444 439 mos7840_port = (struct moschip_port *)urb->context; 445 440 446 441 switch (status) { ··· 520 525 int status = urb->status; 521 526 522 527 dbg("%s", " : Entering\n"); 523 - if (!urb) { 524 - dbg("%s", "Invalid Pointer !!!!:\n"); 525 - return; 526 - } 527 528 528 529 switch (status) { 529 530 case 0: ··· 667 676 struct tty_struct *tty; 668 677 int status = urb->status; 669 678 670 - if (!urb) { 671 - dbg("%s", "Invalid Pointer !!!!:\n"); 672 - return; 673 - } 674 - 675 679 if (status) { 676 680 dbg("nonzero read bulk status received: %d", status); 677 681 return; ··· 738 752 struct tty_struct *tty; 739 753 int status = urb->status; 740 754 int i; 741 - 742 - if (!urb) { 743 - dbg("%s", "Invalid Pointer !!!!:\n"); 744 - return; 745 - } 746 755 747 756 mos7840_port = (struct moschip_port *)urb->context; 748 757 spin_lock(&mos7840_port->pool_lock);
+95 -24
drivers/usb/serial/sierra.c
··· 1 1 /* 2 2 USB Driver for Sierra Wireless 3 3 4 - Copyright (C) 2006 Kevin Lloyd <linux@sierrawireless.com> 4 + Copyright (C) 2006, 2007 Kevin Lloyd <linux@sierrawireless.com> 5 5 6 6 IMPORTANT DISCLAIMER: This driver is not commercially supported by 7 7 Sierra Wireless. Use at your own risk. ··· 12 12 13 13 Portions based on the option driver by Matthias Urlichs <smurf@smurf.noris.de> 14 14 Whom based his on the Keyspan driver by Hugh Blemings <hugh@blemings.org> 15 - 16 15 */ 17 16 18 - #define DRIVER_VERSION "v.1.0.6" 17 + #define DRIVER_VERSION "v.1.2.5b" 19 18 #define DRIVER_AUTHOR "Kevin Lloyd <linux@sierrawireless.com>" 20 19 #define DRIVER_DESC "USB Driver for Sierra Wireless USB modems" 21 20 ··· 27 28 #include <linux/usb.h> 28 29 #include <linux/usb/serial.h> 29 30 31 + #define SWIMS_USB_REQUEST_SetMode 0x0B 32 + #define SWIMS_USB_REQUEST_TYPE_SetMode 0x40 33 + #define SWIMS_USB_INDEX_SetMode 0x0000 34 + #define SWIMS_SET_MODE_Modem 0x0001 35 + 36 + /* per port private data */ 37 + #define N_IN_URB 4 38 + #define N_OUT_URB 4 39 + #define IN_BUFLEN 4096 40 + 41 + static int debug; 42 + 43 + enum devicetype { 44 + DEVICE_3_PORT = 0, 45 + DEVICE_1_PORT = 1, 46 + DEVICE_INSTALLER = 2, 47 + }; 48 + 49 + int sierra_set_power_state(struct usb_device *udev, __u16 swiState) 50 + { 51 + int result; 52 + dev_dbg(&udev->dev, "%s", "SET POWER STATE"); 53 + result = usb_control_msg(udev, usb_sndctrlpipe(udev, 0), 54 + 0x00, /* __u8 request */ 55 + 0x40, /* __u8 request type */ 56 + swiState, /* __u16 value */ 57 + 0, /* __u16 index */ 58 + NULL, /* void *data */ 59 + 0, /* __u16 size */ 60 + USB_CTRL_SET_TIMEOUT); /* int timeout */ 61 + return result; 62 + } 63 + 64 + int sierra_set_ms_mode(struct usb_device *udev, __u16 eSocMode) 65 + { 66 + int result; 67 + dev_dbg(&udev->dev, "%s", "DEVICE MODE SWITCH"); 68 + result = usb_control_msg(udev, usb_sndctrlpipe(udev, 0), 69 + SWIMS_USB_REQUEST_SetMode, /* __u8 request */ 70 + SWIMS_USB_REQUEST_TYPE_SetMode, /* __u8 request type */ 71 + eSocMode, /* __u16 value */ 72 + SWIMS_USB_INDEX_SetMode, /* __u16 index */ 73 + NULL, /* void *data */ 74 + 0, /* __u16 size */ 75 + USB_CTRL_SET_TIMEOUT); /* int timeout */ 76 + return result; 77 + } 78 + 79 + int sierra_probe(struct usb_interface *iface, const struct usb_device_id *id) 80 + { 81 + int result; 82 + struct usb_device *udev; 83 + 84 + udev = usb_get_dev(interface_to_usbdev(iface)); 85 + 86 + /* Check if in installer mode */ 87 + if (id->driver_info == DEVICE_INSTALLER) { 88 + dev_dbg(&udev->dev, "%s", "FOUND DEVICE(SW)\n"); 89 + result = sierra_set_ms_mode(udev, SWIMS_SET_MODE_Modem); 90 + /*We do not want to bind to the device when in installer mode*/ 91 + return -EIO; 92 + } 93 + 94 + return usb_serial_probe(iface, id); 95 + } 30 96 31 97 static struct usb_device_id id_table [] = { 32 98 { USB_DEVICE(0x1199, 0x0017) }, /* Sierra Wireless EM5625 */ 33 99 { USB_DEVICE(0x1199, 0x0018) }, /* Sierra Wireless MC5720 */ 34 100 { USB_DEVICE(0x1199, 0x0218) }, /* Sierra Wireless MC5720 */ 101 + { USB_DEVICE(0x0f30, 0x1b1d) }, /* Sierra Wireless MC5720 */ 35 102 { USB_DEVICE(0x1199, 0x0020) }, /* Sierra Wireless MC5725 */ 36 103 { USB_DEVICE(0x1199, 0x0019) }, /* Sierra Wireless AirCard 595 */ 37 - { USB_DEVICE(0x1199, 0x0120) }, /* Sierra Wireless AirCard 595U */ 38 104 { USB_DEVICE(0x1199, 0x0021) }, /* Sierra Wireless AirCard 597E */ 105 + { USB_DEVICE(0x1199, 0x0120) }, /* Sierra Wireless USB Dongle 595U */ 106 + 39 107 { USB_DEVICE(0x1199, 0x6802) }, /* Sierra Wireless MC8755 */ 40 108 { USB_DEVICE(0x1199, 0x6804) }, /* Sierra Wireless MC8755 */ 41 109 { USB_DEVICE(0x1199, 0x6803) }, /* Sierra Wireless MC8765 */ 42 - { USB_DEVICE(0x1199, 0x6812) }, /* Sierra Wireless MC8775 */ 110 + { USB_DEVICE(0x1199, 0x6812) }, /* Sierra Wireless MC8775 & AC 875U */ 43 111 { USB_DEVICE(0x1199, 0x6820) }, /* Sierra Wireless AirCard 875 */ 112 + { USB_DEVICE(0x1199, 0x6832) }, /* Sierra Wireless MC8780*/ 113 + { USB_DEVICE(0x1199, 0x6833) }, /* Sierra Wireless MC8781*/ 114 + { USB_DEVICE(0x1199, 0x6850) }, /* Sierra Wireless AirCard 880 */ 115 + { USB_DEVICE(0x1199, 0x6851) }, /* Sierra Wireless AirCard 881 */ 116 + { USB_DEVICE(0x1199, 0x6852) }, /* Sierra Wireless AirCard 880 E */ 117 + { USB_DEVICE(0x1199, 0x6853) }, /* Sierra Wireless AirCard 881 E */ 44 118 45 - { USB_DEVICE(0x1199, 0x0112) }, /* Sierra Wireless AirCard 580 */ 46 - { USB_DEVICE(0x0F3D, 0x0112) }, /* AirPrime/Sierra PC 5220 */ 119 + { USB_DEVICE(0x1199, 0x0112), .driver_info = DEVICE_1_PORT }, /* Sierra Wireless AirCard 580 */ 120 + { USB_DEVICE(0x0F3D, 0x0112), .driver_info = DEVICE_1_PORT }, /* Airprime/Sierra PC 5220 */ 121 + 122 + { USB_DEVICE(0x1199, 0x0FFF), .driver_info = DEVICE_INSTALLER}, 47 123 { } 48 124 }; 49 125 MODULE_DEVICE_TABLE(usb, id_table); ··· 132 58 static struct usb_device_id id_table_3port [] = { 133 59 { USB_DEVICE(0x1199, 0x0017) }, /* Sierra Wireless EM5625 */ 134 60 { USB_DEVICE(0x1199, 0x0018) }, /* Sierra Wireless MC5720 */ 61 + { USB_DEVICE(0x0f30, 0x1b1d) }, /* Sierra Wireless MC5720 */ 135 62 { USB_DEVICE(0x1199, 0x0218) }, /* Sierra Wireless MC5720 */ 136 63 { USB_DEVICE(0x1199, 0x0020) }, /* Sierra Wireless MC5725 */ 137 64 { USB_DEVICE(0x1199, 0x0019) }, /* Sierra Wireless AirCard 595 */ 138 - { USB_DEVICE(0x1199, 0x0120) }, /* Sierra Wireless AirCard 595U */ 139 65 { USB_DEVICE(0x1199, 0x0021) }, /* Sierra Wireless AirCard 597E */ 66 + { USB_DEVICE(0x1199, 0x0120) }, /* Sierra Wireless USB Dongle 595U*/ 67 + 140 68 { USB_DEVICE(0x1199, 0x6802) }, /* Sierra Wireless MC8755 */ 141 69 { USB_DEVICE(0x1199, 0x6804) }, /* Sierra Wireless MC8755 */ 142 70 { USB_DEVICE(0x1199, 0x6803) }, /* Sierra Wireless MC8765 */ 143 - { USB_DEVICE(0x1199, 0x6812) }, /* Sierra Wireless MC8775 */ 71 + { USB_DEVICE(0x1199, 0x6812) }, /* Sierra Wireless MC8775 & AC 875U */ 144 72 { USB_DEVICE(0x1199, 0x6820) }, /* Sierra Wireless AirCard 875 */ 73 + { USB_DEVICE(0x1199, 0x6832) }, /* Sierra Wireless MC8780*/ 74 + { USB_DEVICE(0x1199, 0x6833) }, /* Sierra Wireless MC8781*/ 75 + { USB_DEVICE(0x1199, 0x6850) }, /* Sierra Wireless AirCard 880 */ 76 + { USB_DEVICE(0x1199, 0x6851) }, /* Sierra Wireless AirCard 881 */ 77 + { USB_DEVICE(0x1199, 0x6852) }, /* Sierra Wireless AirCard 880E */ 78 + { USB_DEVICE(0x1199, 0x6853) }, /* Sierra Wireless AirCard 881E */ 145 79 { } 146 80 }; 147 81 148 82 static struct usb_driver sierra_driver = { 149 83 .name = "sierra", 150 - .probe = usb_serial_probe, 84 + .probe = sierra_probe, 151 85 .disconnect = usb_serial_disconnect, 152 86 .id_table = id_table, 153 87 .no_dynamic_id = 1, 154 88 }; 155 89 156 - 157 - static int debug; 158 - 159 - /* per port private data */ 160 - #define N_IN_URB 4 161 - #define N_OUT_URB 4 162 - #define IN_BUFLEN 4096 163 90 164 91 struct sierra_port_private { 165 92 spinlock_t lock; /* lock the structure */ ··· 496 421 int i; 497 422 struct urb *urb; 498 423 int result; 499 - __u16 set_mode_dzero = 0x0000; 500 424 501 425 portdata = usb_get_serial_port_data(port); 502 426 ··· 530 456 } 531 457 532 458 port->tty->low_latency = 1; 533 - 534 - /* set mode to D0 */ 535 - result = usb_control_msg(serial->dev, 536 - usb_rcvctrlpipe(serial->dev, 0), 537 - 0x00, 0x40, set_mode_dzero, 0, NULL, 538 - 0, USB_CTRL_SET_TIMEOUT); 539 459 540 460 sierra_send_setup(port); 541 461 ··· 577 509 int j; 578 510 579 511 dbg("%s", __FUNCTION__); 512 + 513 + /*Set Device mode to D0 */ 514 + sierra_set_power_state(serial->dev, 0x0000); 580 515 581 516 /* Now setup per port private data */ 582 517 for (i = 0; i < serial->num_ports; i++) {
+28 -28
drivers/usb/storage/dpcm.c
··· 46 46 */ 47 47 int dpcm_transport(struct scsi_cmnd *srb, struct us_data *us) 48 48 { 49 - int ret; 49 + int ret; 50 50 51 - if(srb == NULL) 52 - return USB_STOR_TRANSPORT_ERROR; 51 + if (srb == NULL) 52 + return USB_STOR_TRANSPORT_ERROR; 53 53 54 - US_DEBUGP("dpcm_transport: LUN=%d\n", srb->device->lun); 54 + US_DEBUGP("dpcm_transport: LUN=%d\n", srb->device->lun); 55 55 56 - switch(srb->device->lun) { 57 - case 0: 56 + switch (srb->device->lun) { 57 + case 0: 58 58 59 - /* 60 - * LUN 0 corresponds to the CompactFlash card reader. 61 - */ 62 - ret = usb_stor_CB_transport(srb, us); 63 - break; 59 + /* 60 + * LUN 0 corresponds to the CompactFlash card reader. 61 + */ 62 + ret = usb_stor_CB_transport(srb, us); 63 + break; 64 64 65 65 #ifdef CONFIG_USB_STORAGE_SDDR09 66 - case 1: 66 + case 1: 67 67 68 - /* 69 - * LUN 1 corresponds to the SmartMedia card reader. 70 - */ 68 + /* 69 + * LUN 1 corresponds to the SmartMedia card reader. 70 + */ 71 71 72 - /* 73 - * Set the LUN to 0 (just in case). 74 - */ 75 - srb->device->lun = 0; us->srb->device->lun = 0; 76 - ret = sddr09_transport(srb, us); 77 - srb->device->lun = 1; us->srb->device->lun = 1; 78 - break; 72 + /* 73 + * Set the LUN to 0 (just in case). 74 + */ 75 + srb->device->lun = 0; us->srb->device->lun = 0; 76 + ret = sddr09_transport(srb, us); 77 + srb->device->lun = 1; us->srb->device->lun = 1; 78 + break; 79 79 80 80 #endif 81 81 82 - default: 83 - US_DEBUGP("dpcm_transport: Invalid LUN %d\n", srb->device->lun); 84 - ret = USB_STOR_TRANSPORT_ERROR; 85 - break; 86 - } 87 - return ret; 82 + default: 83 + US_DEBUGP("dpcm_transport: Invalid LUN %d\n", srb->device->lun); 84 + ret = USB_STOR_TRANSPORT_ERROR; 85 + break; 86 + } 87 + return ret; 88 88 }
+7 -6
drivers/usb/storage/onetouch.c
··· 57 57 struct usb_onetouch *onetouch = urb->context; 58 58 signed char *data = onetouch->data; 59 59 struct input_dev *dev = onetouch->dev; 60 - int status; 60 + int status = urb->status; 61 + int retval; 61 62 62 - switch (urb->status) { 63 + switch (status) { 63 64 case 0: /* success */ 64 65 break; 65 66 case -ECONNRESET: /* unlink */ ··· 76 75 input_sync(dev); 77 76 78 77 resubmit: 79 - status = usb_submit_urb (urb, GFP_ATOMIC); 80 - if (status) 81 - err ("can't resubmit intr, %s-%s/input0, status %d", 78 + retval = usb_submit_urb (urb, GFP_ATOMIC); 79 + if (retval) 80 + err ("can't resubmit intr, %s-%s/input0, retval %d", 82 81 onetouch->udev->bus->bus_name, 83 - onetouch->udev->devpath, status); 82 + onetouch->udev->devpath, retval); 84 83 } 85 84 86 85 static int usb_onetouch_open(struct input_dev *dev)
+18
drivers/usb/storage/unusual_devs.h
··· 313 313 US_SC_DEVICE, US_PR_DEVICE,NULL, 314 314 US_FL_NOT_LOCKABLE ), 315 315 316 + /* Reported by Stefan de Konink <skinkie@xs4all.nl> */ 317 + UNUSUAL_DEV( 0x04b0, 0x0401, 0x0200, 0x0200, 318 + "NIKON", 319 + "NIKON DSC D100", 320 + US_SC_DEVICE, US_PR_DEVICE, NULL, 321 + US_FL_FIX_CAPACITY), 322 + 316 323 /* Reported by Andreas Bockhold <andreas@bockionline.de> */ 317 324 UNUSUAL_DEV( 0x04b0, 0x0405, 0x0100, 0x0100, 318 325 "NIKON", ··· 1390 1383 "UCR-61S2B", 1391 1384 US_SC_DEVICE, US_PR_DEVICE, usb_stor_ucr61s2b_init, 1392 1385 0 ), 1386 + 1387 + /* Reported by Kevin Lloyd <linux@sierrawireless.com> 1388 + * Entry is needed for the initializer function override, 1389 + * which instructs the device to load as a modem 1390 + * device. 1391 + */ 1392 + UNUSUAL_DEV( 0x1199, 0x0fff, 0x0000, 0x9999, 1393 + "Sierra Wireless", 1394 + "USB MMC Storage", 1395 + US_SC_DEVICE, US_PR_DEVICE, NULL, 1396 + US_FL_IGNORE_DEVICE), 1393 1397 1394 1398 /* Reported by Jaco Kroon <jaco@kroon.co.za> 1395 1399 * The usb-storage module found on the Digitech GNX4 (and supposedly other