Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

sh: maple: Support block reads and writes.

This patch updates the maple bus to support asynchronous block reads
and writes as well as generally improving the quality of the code and
supporting concurrency (all needed to support the Dreamcast visual
memory unit - a driver will also be posted for that).

Changes in the bus driver necessitate some changes in the two maple bus
input drivers that are currently in mainline.

As well as supporting block reads and writes this code clean up removes
some poor handling of locks, uses an atomic status variable to serialise
access to devices and more robusly handles the general performance
problems of the bus.

Signed-off-by: Adrian McMenamin <adrian@mcmen.demon.co.uk>
Signed-off-by: Paul Mundt <lethal@linux-sh.org>

authored by

Adrian McMenamin and committed by
Paul Mundt
b233b28e 41480ae7

+301 -267
+2 -2
drivers/input/joystick/maplecontrol.c
··· 3 3 * Based on drivers/usb/iforce.c 4 4 * 5 5 * Copyright Yaegashi Takeshi, 2001 6 - * Adrian McMenamin, 2008 6 + * Adrian McMenamin, 2008 - 2009 7 7 */ 8 8 9 9 #include <linux/kernel.h> ··· 29 29 struct maple_device *mapledev = mq->dev; 30 30 struct dc_pad *pad = maple_get_drvdata(mapledev); 31 31 struct input_dev *dev = pad->dev; 32 - unsigned char *res = mq->recvbuf; 32 + unsigned char *res = mq->recvbuf->buf; 33 33 34 34 buttons = ~le16_to_cpup((__le16 *)(res + 8)); 35 35
+23 -16
drivers/input/keyboard/maple_keyb.c
··· 1 1 /* 2 2 * SEGA Dreamcast keyboard driver 3 3 * Based on drivers/usb/usbkbd.c 4 - * Copyright YAEGASHI Takeshi, 2001 5 - * Porting to 2.6 Copyright Adrian McMenamin, 2007, 2008 4 + * Copyright (c) YAEGASHI Takeshi, 2001 5 + * Porting to 2.6 Copyright (c) Adrian McMenamin, 2007 - 2009 6 6 * 7 7 * This program is free software; you can redistribute it and/or modify 8 8 * it under the terms of the GNU General Public License as published by ··· 33 33 34 34 #define NR_SCANCODES 256 35 35 36 - MODULE_AUTHOR("YAEGASHI Takeshi, Adrian McMenamin"); 36 + MODULE_AUTHOR("Adrian McMenamin <adrian@mcmen.demon.co.uk"); 37 37 MODULE_DESCRIPTION("SEGA Dreamcast keyboard driver"); 38 38 MODULE_LICENSE("GPL"); 39 39 ··· 115 115 input_event(dev, EV_MSC, MSC_SCAN, code); 116 116 input_report_key(dev, keycode, 0); 117 117 } else 118 - printk(KERN_DEBUG "maple_keyb: " 118 + dev_dbg(&dev->dev, 119 119 "Unknown key (scancode %#x) released.", 120 120 code); 121 121 } ··· 127 127 input_event(dev, EV_MSC, MSC_SCAN, code); 128 128 input_report_key(dev, keycode, 1); 129 129 } else 130 - printk(KERN_DEBUG "maple_keyb: " 130 + dev_dbg(&dev->dev, 131 131 "Unknown key (scancode %#x) pressed.", 132 132 code); 133 133 } ··· 140 140 { 141 141 struct maple_device *mapledev = mq->dev; 142 142 struct dc_kbd *kbd = maple_get_drvdata(mapledev); 143 - unsigned long *buf = mq->recvbuf; 143 + unsigned long *buf = (unsigned long *)(mq->recvbuf->buf); 144 144 145 145 /* 146 146 * We should always get the lock because the only ··· 159 159 160 160 static int probe_maple_kbd(struct device *dev) 161 161 { 162 - struct maple_device *mdev = to_maple_dev(dev); 163 - struct maple_driver *mdrv = to_maple_driver(dev->driver); 162 + struct maple_device *mdev; 163 + struct maple_driver *mdrv; 164 164 int i, error; 165 165 struct dc_kbd *kbd; 166 166 struct input_dev *idev; 167 167 168 - if (!(mdev->function & MAPLE_FUNC_KEYBOARD)) 169 - return -EINVAL; 168 + mdev = to_maple_dev(dev); 169 + mdrv = to_maple_driver(dev->driver); 170 170 171 171 kbd = kzalloc(sizeof(struct dc_kbd), GFP_KERNEL); 172 - idev = input_allocate_device(); 173 - if (!kbd || !idev) { 172 + if (!kbd) { 174 173 error = -ENOMEM; 175 174 goto fail; 175 + } 176 + 177 + idev = input_allocate_device(); 178 + if (!idev) { 179 + error = -ENOMEM; 180 + goto fail_idev_alloc; 176 181 } 177 182 178 183 kbd->dev = idev; ··· 200 195 201 196 error = input_register_device(idev); 202 197 if (error) 203 - goto fail; 198 + goto fail_register; 204 199 205 200 /* Maple polling is locked to VBLANK - which may be just 50/s */ 206 201 maple_getcond_callback(mdev, dc_kbd_callback, HZ/50, ··· 212 207 213 208 return error; 214 209 215 - fail: 216 - input_free_device(idev); 217 - kfree(kbd); 210 + fail_register: 218 211 maple_set_drvdata(mdev, NULL); 212 + input_free_device(idev); 213 + fail_idev_alloc: 214 + kfree(kbd); 215 + fail: 219 216 return error; 220 217 } 221 218
+235 -228
drivers/sh/maple/maple.c
··· 1 1 /* 2 2 * Core maple bus functionality 3 3 * 4 - * Copyright (C) 2007, 2008 Adrian McMenamin 4 + * Copyright (C) 2007 - 2009 Adrian McMenamin 5 5 * Copyright (C) 2001 - 2008 Paul Mundt 6 - * 7 - * Based on 2.4 code by: 8 - * 9 - * Copyright (C) 2000-2001 YAEGASHI Takeshi 6 + * Copyright (C) 2000 - 2001 YAEGASHI Takeshi 10 7 * Copyright (C) 2001 M. R. Brown 11 - * Copyright (C) 2001 Paul Mundt 12 - * 13 - * and others. 14 8 * 15 9 * This file is subject to the terms and conditions of the GNU General Public 16 10 * License. See the file "COPYING" in the main directory of this archive ··· 26 32 #include <mach/dma.h> 27 33 #include <mach/sysasic.h> 28 34 29 - MODULE_AUTHOR("Yaegashi Takeshi, Paul Mundt, M. R. Brown, Adrian McMenamin"); 35 + MODULE_AUTHOR("Adrian McMenamin <adrian@mcmen.demon.co.uk>"); 30 36 MODULE_DESCRIPTION("Maple bus driver for Dreamcast"); 31 37 MODULE_LICENSE("GPL v2"); 32 38 MODULE_SUPPORTED_DEVICE("{{SEGA, Dreamcast/Maple}}"); ··· 43 49 /* mutex to protect queue of waiting packets */ 44 50 static DEFINE_MUTEX(maple_wlist_lock); 45 51 46 - static struct maple_driver maple_dummy_driver; 52 + static struct maple_driver maple_unsupported_device; 47 53 static struct device maple_bus; 48 54 static int subdevice_map[MAPLE_PORTS]; 49 55 static unsigned long *maple_sendbuf, *maple_sendptr, *maple_lastptr; ··· 56 62 int unit; 57 63 }; 58 64 59 - static bool checked[4]; 60 - static struct maple_device *baseunits[4]; 65 + static bool checked[MAPLE_PORTS]; 66 + static bool empty[MAPLE_PORTS]; 67 + static struct maple_device *baseunits[MAPLE_PORTS]; 61 68 62 69 /** 63 70 * maple_driver_register - register a maple driver ··· 92 97 EXPORT_SYMBOL_GPL(maple_driver_unregister); 93 98 94 99 /* set hardware registers to enable next round of dma */ 95 - static void maplebus_dma_reset(void) 100 + static void maple_dma_reset(void) 96 101 { 97 102 ctrl_outl(MAPLE_MAGIC, MAPLE_RESET); 98 103 /* set trig type to 0 for software trigger, 1 for hardware (VBLANK) */ 99 104 ctrl_outl(1, MAPLE_TRIGTYPE); 100 - ctrl_outl(MAPLE_2MBPS | MAPLE_TIMEOUT(50000), MAPLE_SPEED); 105 + /* 106 + * Maple system register 107 + * bits 31 - 16 timeout in units of 20nsec 108 + * bit 12 hard trigger - set 0 to keep responding to VBLANK 109 + * bits 9 - 8 set 00 for 2 Mbps, 01 for 1 Mbps 110 + * bits 3 - 0 delay (in 1.3ms) between VBLANK and start of DMA 111 + * max delay is 11 112 + */ 113 + ctrl_outl(MAPLE_2MBPS | MAPLE_TIMEOUT(0xFFFF), MAPLE_SPEED); 101 114 ctrl_outl(PHYSADDR(maple_sendbuf), MAPLE_DMAADDR); 102 115 ctrl_outl(1, MAPLE_ENABLE); 103 116 } ··· 137 134 { 138 135 struct maple_device *mdev; 139 136 struct mapleq *mq; 140 - if (!dev) 141 - return; 137 + 142 138 mdev = to_maple_dev(dev); 143 139 mq = mdev->mq; 144 - if (mq) { 145 - if (mq->recvbufdcsp) 146 - kmem_cache_free(maple_queue_cache, mq->recvbufdcsp); 147 - kfree(mq); 148 - mq = NULL; 149 - } 140 + kmem_cache_free(maple_queue_cache, mq->recvbuf); 141 + kfree(mq); 150 142 kfree(mdev); 151 143 } 152 144 153 145 /** 154 - * maple_add_packet - add a single instruction to the queue 146 + * maple_add_packet - add a single instruction to the maple bus queue 155 147 * @mdev: maple device 156 148 * @function: function on device being queried 157 149 * @command: maple command to add ··· 156 158 int maple_add_packet(struct maple_device *mdev, u32 function, u32 command, 157 159 size_t length, void *data) 158 160 { 159 - int locking, ret = 0; 161 + int ret = 0; 160 162 void *sendbuf = NULL; 161 163 162 - mutex_lock(&maple_wlist_lock); 163 - /* bounce if device already locked */ 164 - locking = mutex_is_locked(&mdev->mq->mutex); 165 - if (locking) { 166 - ret = -EBUSY; 167 - goto out; 168 - } 169 - 170 - mutex_lock(&mdev->mq->mutex); 171 - 172 164 if (length) { 173 - sendbuf = kmalloc(length * 4, GFP_KERNEL); 165 + sendbuf = kzalloc(length * 4, GFP_KERNEL); 174 166 if (!sendbuf) { 175 - mutex_unlock(&mdev->mq->mutex); 176 167 ret = -ENOMEM; 177 168 goto out; 178 169 } ··· 174 187 memcpy(sendbuf + 4, data, (length - 1) * 4); 175 188 mdev->mq->sendbuf = sendbuf; 176 189 177 - list_add(&mdev->mq->list, &maple_waitq); 178 - out: 190 + mutex_lock(&maple_wlist_lock); 191 + list_add_tail(&mdev->mq->list, &maple_waitq); 179 192 mutex_unlock(&maple_wlist_lock); 193 + out: 180 194 return ret; 181 195 } 182 196 EXPORT_SYMBOL_GPL(maple_add_packet); 183 - 184 - /** 185 - * maple_add_packet_sleeps - add a single instruction to the queue 186 - * @mdev: maple device 187 - * @function: function on device being queried 188 - * @command: maple command to add 189 - * @length: length of command string (in 32 bit words) 190 - * @data: remainder of command string 191 - * 192 - * Same as maple_add_packet(), but waits for the lock to become free. 193 - */ 194 - int maple_add_packet_sleeps(struct maple_device *mdev, u32 function, 195 - u32 command, size_t length, void *data) 196 - { 197 - int locking, ret = 0; 198 - void *sendbuf = NULL; 199 - 200 - locking = mutex_lock_interruptible(&mdev->mq->mutex); 201 - if (locking) { 202 - ret = -EIO; 203 - goto out; 204 - } 205 - 206 - if (length) { 207 - sendbuf = kmalloc(length * 4, GFP_KERNEL); 208 - if (!sendbuf) { 209 - mutex_unlock(&mdev->mq->mutex); 210 - ret = -ENOMEM; 211 - goto out; 212 - } 213 - ((__be32 *)sendbuf)[0] = cpu_to_be32(function); 214 - } 215 - 216 - mdev->mq->command = command; 217 - mdev->mq->length = length; 218 - if (length > 1) 219 - memcpy(sendbuf + 4, data, (length - 1) * 4); 220 - mdev->mq->sendbuf = sendbuf; 221 - 222 - mutex_lock(&maple_wlist_lock); 223 - list_add(&mdev->mq->list, &maple_waitq); 224 - mutex_unlock(&maple_wlist_lock); 225 - out: 226 - return ret; 227 - } 228 - EXPORT_SYMBOL_GPL(maple_add_packet_sleeps); 229 197 230 198 static struct mapleq *maple_allocq(struct maple_device *mdev) 231 199 { 232 200 struct mapleq *mq; 233 201 234 - mq = kmalloc(sizeof(*mq), GFP_KERNEL); 202 + mq = kzalloc(sizeof(*mq), GFP_KERNEL); 235 203 if (!mq) 236 204 goto failed_nomem; 237 205 206 + INIT_LIST_HEAD(&mq->list); 238 207 mq->dev = mdev; 239 - mq->recvbufdcsp = kmem_cache_zalloc(maple_queue_cache, GFP_KERNEL); 240 - mq->recvbuf = (void *) P2SEGADDR(mq->recvbufdcsp); 208 + mq->recvbuf = kmem_cache_zalloc(maple_queue_cache, GFP_KERNEL); 241 209 if (!mq->recvbuf) 242 210 goto failed_p2; 243 - /* 244 - * most devices do not need the mutex - but 245 - * anything that injects block reads or writes 246 - * will rely on it 247 - */ 248 - mutex_init(&mq->mutex); 211 + mq->recvbuf->buf = &((mq->recvbuf->bufx)[0]); 249 212 250 213 return mq; 251 214 252 215 failed_p2: 253 216 kfree(mq); 254 217 failed_nomem: 218 + dev_err(&mdev->dev, "could not allocate memory for device (%d, %d)\n", 219 + mdev->port, mdev->unit); 255 220 return NULL; 256 221 } 257 222 ··· 211 272 { 212 273 struct maple_device *mdev; 213 274 275 + /* zero this out to avoid kobj subsystem 276 + * thinking it has already been registered */ 277 + 214 278 mdev = kzalloc(sizeof(*mdev), GFP_KERNEL); 215 279 if (!mdev) 216 280 return NULL; 217 281 218 282 mdev->port = port; 219 283 mdev->unit = unit; 284 + 220 285 mdev->mq = maple_allocq(mdev); 221 286 222 287 if (!mdev->mq) { ··· 229 286 } 230 287 mdev->dev.bus = &maple_bus_type; 231 288 mdev->dev.parent = &maple_bus; 289 + init_waitqueue_head(&mdev->maple_wait); 232 290 return mdev; 233 291 } 234 292 235 293 static void maple_free_dev(struct maple_device *mdev) 236 294 { 237 - if (!mdev) 238 - return; 239 - if (mdev->mq) { 240 - if (mdev->mq->recvbufdcsp) 241 - kmem_cache_free(maple_queue_cache, 242 - mdev->mq->recvbufdcsp); 243 - kfree(mdev->mq); 244 - } 295 + kmem_cache_free(maple_queue_cache, mdev->mq->recvbuf); 296 + kfree(mdev->mq); 245 297 kfree(mdev); 246 298 } 247 299 ··· 258 320 maple_lastptr = maple_sendptr; 259 321 260 322 *maple_sendptr++ = (port << 16) | len | 0x80000000; 261 - *maple_sendptr++ = PHYSADDR(mq->recvbuf); 323 + *maple_sendptr++ = PHYSADDR(mq->recvbuf->buf); 262 324 *maple_sendptr++ = 263 325 mq->command | (to << 8) | (from << 16) | (len << 24); 264 326 while (len-- > 0) ··· 271 333 int i, maple_packets = 0; 272 334 struct mapleq *mq, *nmq; 273 335 336 + if (!maple_dma_done()) 337 + return; 338 + 339 + /* disable DMA */ 340 + ctrl_outl(0, MAPLE_ENABLE); 341 + 274 342 if (!list_empty(&maple_sentq)) 275 - return; 343 + goto finish; 344 + 276 345 mutex_lock(&maple_wlist_lock); 277 - if (list_empty(&maple_waitq) || !maple_dma_done()) { 346 + if (list_empty(&maple_waitq)) { 278 347 mutex_unlock(&maple_wlist_lock); 279 - return; 348 + goto finish; 280 349 } 281 - mutex_unlock(&maple_wlist_lock); 350 + 282 351 maple_lastptr = maple_sendbuf; 283 352 maple_sendptr = maple_sendbuf; 284 - mutex_lock(&maple_wlist_lock); 353 + 285 354 list_for_each_entry_safe(mq, nmq, &maple_waitq, list) { 286 355 maple_build_block(mq); 287 - list_move(&mq->list, &maple_sentq); 356 + list_del_init(&mq->list); 357 + list_add_tail(&mq->list, &maple_sentq); 288 358 if (maple_packets++ > MAPLE_MAXPACKETS) 289 359 break; 290 360 } ··· 302 356 dma_cache_sync(0, maple_sendbuf + i * PAGE_SIZE, 303 357 PAGE_SIZE, DMA_BIDIRECTIONAL); 304 358 } 359 + 360 + finish: 361 + maple_dma_reset(); 305 362 } 306 363 307 364 /* check if there is a driver registered likely to match this device */ 308 - static int check_matching_maple_driver(struct device_driver *driver, 365 + static int maple_check_matching_driver(struct device_driver *driver, 309 366 void *devptr) 310 367 { 311 368 struct maple_driver *maple_drv; ··· 323 374 324 375 static void maple_detach_driver(struct maple_device *mdev) 325 376 { 326 - if (!mdev) 327 - return; 328 377 device_unregister(&mdev->dev); 329 - mdev = NULL; 330 378 } 331 379 332 380 /* process initial MAPLE_COMMAND_DEVINFO for each device or port */ ··· 331 385 { 332 386 char *p, *recvbuf; 333 387 unsigned long function; 334 - int matched, retval; 388 + int matched, error; 335 389 336 - recvbuf = mdev->mq->recvbuf; 390 + recvbuf = mdev->mq->recvbuf->buf; 337 391 /* copy the data as individual elements in 338 392 * case of memory optimisation */ 339 393 memcpy(&mdev->devinfo.function, recvbuf + 4, 4); ··· 341 395 memcpy(&mdev->devinfo.area_code, recvbuf + 20, 1); 342 396 memcpy(&mdev->devinfo.connector_direction, recvbuf + 21, 1); 343 397 memcpy(&mdev->devinfo.product_name[0], recvbuf + 22, 30); 344 - memcpy(&mdev->devinfo.product_licence[0], recvbuf + 52, 60); 345 398 memcpy(&mdev->devinfo.standby_power, recvbuf + 112, 2); 346 399 memcpy(&mdev->devinfo.max_power, recvbuf + 114, 2); 347 400 memcpy(mdev->product_name, mdev->devinfo.product_name, 30); ··· 359 414 else 360 415 break; 361 416 362 - printk(KERN_INFO "Maple device detected: %s\n", 363 - mdev->product_name); 364 - printk(KERN_INFO "Maple device: %s\n", mdev->product_licence); 365 - 366 417 function = be32_to_cpu(mdev->devinfo.function); 418 + 419 + dev_info(&mdev->dev, "detected %s: function 0x%lX: at (%d, %d)\n", 420 + mdev->product_name, function, mdev->port, mdev->unit); 367 421 368 422 if (function > 0x200) { 369 423 /* Do this silently - as not a real device */ 370 424 function = 0; 371 - mdev->driver = &maple_dummy_driver; 425 + mdev->driver = &maple_unsupported_device; 372 426 sprintf(mdev->dev.bus_id, "%d:0.port", mdev->port); 427 + 373 428 } else { 374 - printk(KERN_INFO 375 - "Maple bus at (%d, %d): Function 0x%lX\n", 376 - mdev->port, mdev->unit, function); 377 429 378 430 matched = 379 431 bus_for_each_drv(&maple_bus_type, NULL, mdev, 380 - check_matching_maple_driver); 432 + maple_check_matching_driver); 381 433 382 434 if (matched == 0) { 383 435 /* Driver does not exist yet */ 384 - printk(KERN_INFO 385 - "No maple driver found.\n"); 386 - mdev->driver = &maple_dummy_driver; 436 + dev_info(&mdev->dev, "no driver found\n"); 437 + mdev->driver = &maple_unsupported_device; 387 438 } 388 439 sprintf(mdev->dev.bus_id, "%d:0%d.%lX", mdev->port, 389 440 mdev->unit, function); 390 441 } 442 + 391 443 mdev->function = function; 392 444 mdev->dev.release = &maple_release_device; 393 - retval = device_register(&mdev->dev); 394 - if (retval) { 395 - printk(KERN_INFO 396 - "Maple bus: Attempt to register device" 397 - " (%x, %x) failed.\n", 398 - mdev->port, mdev->unit); 445 + 446 + atomic_set(&mdev->busy, 0); 447 + error = device_register(&mdev->dev); 448 + if (error) { 449 + dev_warn(&mdev->dev, "could not register device at" 450 + " (%d, %d), with error 0x%X\n", mdev->unit, 451 + mdev->port, error); 399 452 maple_free_dev(mdev); 400 453 mdev = NULL; 401 454 return; ··· 405 462 * port and unit then return 1 - allows identification 406 463 * of which devices need to be attached or detached 407 464 */ 408 - static int detach_maple_device(struct device *device, void *portptr) 465 + static int check_maple_device(struct device *device, void *portptr) 409 466 { 410 467 struct maple_device_specify *ds; 411 468 struct maple_device *mdev; ··· 420 477 static int setup_maple_commands(struct device *device, void *ignored) 421 478 { 422 479 int add; 423 - struct maple_device *maple_dev = to_maple_dev(device); 424 - 425 - if ((maple_dev->interval > 0) 426 - && time_after(jiffies, maple_dev->when)) { 427 - /* bounce if we cannot lock */ 428 - add = maple_add_packet(maple_dev, 429 - be32_to_cpu(maple_dev->devinfo.function), 480 + struct maple_device *mdev = to_maple_dev(device); 481 + if (mdev->interval > 0 && atomic_read(&mdev->busy) == 0 && 482 + time_after(jiffies, mdev->when)) { 483 + /* bounce if we cannot add */ 484 + add = maple_add_packet(mdev, 485 + be32_to_cpu(mdev->devinfo.function), 430 486 MAPLE_COMMAND_GETCOND, 1, NULL); 431 487 if (!add) 432 - maple_dev->when = jiffies + maple_dev->interval; 488 + mdev->when = jiffies + mdev->interval; 433 489 } else { 434 490 if (time_after(jiffies, maple_pnp_time)) 435 - /* This will also bounce */ 436 - maple_add_packet(maple_dev, 0, 437 - MAPLE_COMMAND_DEVINFO, 0, NULL); 491 + /* Ensure we don't have block reads and devinfo 492 + * calls interfering with one another - so flag the 493 + * device as busy */ 494 + if (atomic_read(&mdev->busy) == 0) { 495 + atomic_set(&mdev->busy, 1); 496 + maple_add_packet(mdev, 0, 497 + MAPLE_COMMAND_DEVINFO, 0, NULL); 498 + } 438 499 } 439 500 return 0; 440 501 } ··· 446 499 /* VBLANK bottom half - implemented via workqueue */ 447 500 static void maple_vblank_handler(struct work_struct *work) 448 501 { 449 - if (!list_empty(&maple_sentq) || !maple_dma_done()) 502 + int x, locking; 503 + struct maple_device *mdev; 504 + 505 + if (!maple_dma_done()) 450 506 return; 451 507 452 508 ctrl_outl(0, MAPLE_ENABLE); 453 509 510 + if (!list_empty(&maple_sentq)) 511 + goto finish; 512 + 513 + /* 514 + * Set up essential commands - to fetch data and 515 + * check devices are still present 516 + */ 454 517 bus_for_each_dev(&maple_bus_type, NULL, NULL, 455 - setup_maple_commands); 518 + setup_maple_commands); 456 519 457 - if (time_after(jiffies, maple_pnp_time)) 520 + if (time_after(jiffies, maple_pnp_time)) { 521 + /* 522 + * Scan the empty ports - bus is flakey and may have 523 + * mis-reported emptyness 524 + */ 525 + for (x = 0; x < MAPLE_PORTS; x++) { 526 + if (checked[x] && empty[x]) { 527 + mdev = baseunits[x]; 528 + if (!mdev) 529 + break; 530 + atomic_set(&mdev->busy, 1); 531 + locking = maple_add_packet(mdev, 0, 532 + MAPLE_COMMAND_DEVINFO, 0, NULL); 533 + if (!locking) 534 + break; 535 + } 536 + } 537 + 458 538 maple_pnp_time = jiffies + MAPLE_PNP_INTERVAL; 459 - 460 - mutex_lock(&maple_wlist_lock); 461 - if (!list_empty(&maple_waitq) && list_empty(&maple_sentq)) { 462 - mutex_unlock(&maple_wlist_lock); 463 - maple_send(); 464 - } else { 465 - mutex_unlock(&maple_wlist_lock); 466 539 } 467 540 468 - maplebus_dma_reset(); 541 + finish: 542 + maple_send(); 469 543 } 470 544 471 - /* handle devices added via hotplugs - placing them on queue for DEVINFO*/ 545 + /* handle devices added via hotplugs - placing them on queue for DEVINFO */ 472 546 static void maple_map_subunits(struct maple_device *mdev, int submask) 473 547 { 474 548 int retval, k, devcheck; ··· 501 533 ds.unit = k + 1; 502 534 retval = 503 535 bus_for_each_dev(&maple_bus_type, NULL, &ds, 504 - detach_maple_device); 536 + check_maple_device); 505 537 if (retval) { 506 538 submask = submask >> 1; 507 539 continue; ··· 511 543 mdev_add = maple_alloc_dev(mdev->port, k + 1); 512 544 if (!mdev_add) 513 545 return; 546 + atomic_set(&mdev_add->busy, 1); 514 547 maple_add_packet(mdev_add, 0, MAPLE_COMMAND_DEVINFO, 515 548 0, NULL); 516 549 /* mark that we are checking sub devices */ ··· 533 564 } 534 565 535 566 /* handle empty port or hotplug removal */ 536 - static void maple_response_none(struct maple_device *mdev, 537 - struct mapleq *mq) 567 + static void maple_response_none(struct maple_device *mdev) 538 568 { 539 - if (mdev->unit != 0) { 540 - list_del(&mq->list); 541 - maple_clean_submap(mdev); 542 - printk(KERN_INFO 543 - "Maple bus device detaching at (%d, %d)\n", 544 - mdev->port, mdev->unit); 569 + maple_clean_submap(mdev); 570 + 571 + if (likely(mdev->unit != 0)) { 572 + /* 573 + * Block devices play up 574 + * and give the impression they have 575 + * been removed even when still in place or 576 + * trip the mtd layer when they have 577 + * really gone - this code traps that eventuality 578 + * and ensures we aren't overloaded with useless 579 + * error messages 580 + */ 581 + if (mdev->can_unload) { 582 + if (!mdev->can_unload(mdev)) { 583 + atomic_set(&mdev->busy, 2); 584 + wake_up(&mdev->maple_wait); 585 + return; 586 + } 587 + } 588 + 589 + dev_info(&mdev->dev, "detaching device at (%d, %d)\n", 590 + mdev->port, mdev->unit); 545 591 maple_detach_driver(mdev); 546 592 return; 547 - } 548 - if (!started || !fullscan) { 549 - if (checked[mdev->port] == false) { 550 - checked[mdev->port] = true; 551 - printk(KERN_INFO "No maple devices attached" 552 - " to port %d\n", mdev->port); 593 + } else { 594 + if (!started || !fullscan) { 595 + if (checked[mdev->port] == false) { 596 + checked[mdev->port] = true; 597 + empty[mdev->port] = true; 598 + dev_info(&mdev->dev, "no devices" 599 + " to port %d\n", mdev->port); 600 + } 601 + return; 553 602 } 554 - return; 555 603 } 556 - maple_clean_submap(mdev); 604 + /* Some hardware devices generate false detach messages on unit 0 */ 605 + atomic_set(&mdev->busy, 0); 557 606 } 558 607 559 608 /* preprocess hotplugs or scans */ ··· 586 599 } else { 587 600 if (mdev->unit != 0) 588 601 maple_attach_driver(mdev); 602 + if (mdev->unit == 0) { 603 + empty[mdev->port] = false; 604 + maple_attach_driver(mdev); 605 + } 589 606 } 590 - return; 591 607 } 592 608 if (mdev->unit == 0) { 593 609 submask = recvbuf[2] & 0x1F; ··· 599 609 subdevice_map[mdev->port] = submask; 600 610 } 601 611 } 612 + } 613 + 614 + static void maple_response_fileerr(struct maple_device *mdev, void *recvbuf) 615 + { 616 + if (mdev->fileerr_handler) { 617 + mdev->fileerr_handler(mdev, recvbuf); 618 + return; 619 + } else 620 + dev_warn(&mdev->dev, "device at (%d, %d) reports" 621 + "file error 0x%X\n", mdev->port, mdev->unit, 622 + ((int *)recvbuf)[1]); 602 623 } 603 624 604 625 static void maple_port_rescan(void) ··· 622 621 if (checked[i] == false) { 623 622 fullscan = 0; 624 623 mdev = baseunits[i]; 625 - /* 626 - * test lock in case scan has failed 627 - * but device is still locked 628 - */ 629 - if (mutex_is_locked(&mdev->mq->mutex)) 630 - mutex_unlock(&mdev->mq->mutex); 631 624 maple_add_packet(mdev, 0, MAPLE_COMMAND_DEVINFO, 632 625 0, NULL); 633 626 } ··· 632 637 static void maple_dma_handler(struct work_struct *work) 633 638 { 634 639 struct mapleq *mq, *nmq; 635 - struct maple_device *dev; 640 + struct maple_device *mdev; 636 641 char *recvbuf; 637 642 enum maple_code code; 638 643 ··· 641 646 ctrl_outl(0, MAPLE_ENABLE); 642 647 if (!list_empty(&maple_sentq)) { 643 648 list_for_each_entry_safe(mq, nmq, &maple_sentq, list) { 644 - recvbuf = mq->recvbuf; 649 + mdev = mq->dev; 650 + recvbuf = mq->recvbuf->buf; 651 + dma_cache_sync(&mdev->dev, recvbuf, 0x400, 652 + DMA_FROM_DEVICE); 645 653 code = recvbuf[0]; 646 - dev = mq->dev; 647 654 kfree(mq->sendbuf); 648 - mutex_unlock(&mq->mutex); 649 655 list_del_init(&mq->list); 650 - 651 656 switch (code) { 652 657 case MAPLE_RESPONSE_NONE: 653 - maple_response_none(dev, mq); 658 + maple_response_none(mdev); 654 659 break; 655 660 656 661 case MAPLE_RESPONSE_DEVINFO: 657 - maple_response_devinfo(dev, recvbuf); 662 + maple_response_devinfo(mdev, recvbuf); 663 + atomic_set(&mdev->busy, 0); 658 664 break; 659 665 660 666 case MAPLE_RESPONSE_DATATRF: 661 - if (dev->callback) 662 - dev->callback(mq); 667 + if (mdev->callback) 668 + mdev->callback(mq); 669 + atomic_set(&mdev->busy, 0); 670 + wake_up(&mdev->maple_wait); 663 671 break; 664 672 665 673 case MAPLE_RESPONSE_FILEERR: 674 + maple_response_fileerr(mdev, recvbuf); 675 + atomic_set(&mdev->busy, 0); 676 + wake_up(&mdev->maple_wait); 677 + break; 678 + 666 679 case MAPLE_RESPONSE_AGAIN: 667 680 case MAPLE_RESPONSE_BADCMD: 668 681 case MAPLE_RESPONSE_BADFUNC: 669 - printk(KERN_DEBUG 670 - "Maple non-fatal error 0x%X\n", 671 - code); 682 + dev_warn(&mdev->dev, "non-fatal error" 683 + " 0x%X at (%d, %d)\n", code, 684 + mdev->port, mdev->unit); 685 + atomic_set(&mdev->busy, 0); 672 686 break; 673 687 674 688 case MAPLE_RESPONSE_ALLINFO: 675 - printk(KERN_DEBUG 676 - "Maple - extended device information" 677 - " not supported\n"); 689 + dev_notice(&mdev->dev, "extended" 690 + " device information request for (%d, %d)" 691 + " but call is not supported\n", mdev->port, 692 + mdev->unit); 693 + atomic_set(&mdev->busy, 0); 678 694 break; 679 695 680 696 case MAPLE_RESPONSE_OK: 697 + atomic_set(&mdev->busy, 0); 698 + wake_up(&mdev->maple_wait); 681 699 break; 682 700 683 701 default: ··· 707 699 if (!fullscan) 708 700 maple_port_rescan(); 709 701 /* mark that we have been through the first scan */ 710 - if (started == 0) 711 - started = 1; 702 + started = 1; 712 703 } 713 - maplebus_dma_reset(); 704 + maple_send(); 714 705 } 715 706 716 - static irqreturn_t maplebus_dma_interrupt(int irq, void *dev_id) 707 + static irqreturn_t maple_dma_interrupt(int irq, void *dev_id) 717 708 { 718 709 /* Load everything into the bottom half */ 719 710 schedule_work(&maple_dma_process); 720 711 return IRQ_HANDLED; 721 712 } 722 713 723 - static irqreturn_t maplebus_vblank_interrupt(int irq, void *dev_id) 714 + static irqreturn_t maple_vblank_interrupt(int irq, void *dev_id) 724 715 { 725 716 schedule_work(&maple_vblank_process); 726 717 return IRQ_HANDLED; ··· 727 720 728 721 static int maple_set_dma_interrupt_handler(void) 729 722 { 730 - return request_irq(HW_EVENT_MAPLE_DMA, maplebus_dma_interrupt, 731 - IRQF_SHARED, "maple bus DMA", &maple_dummy_driver); 723 + return request_irq(HW_EVENT_MAPLE_DMA, maple_dma_interrupt, 724 + IRQF_SHARED, "maple bus DMA", &maple_unsupported_device); 732 725 } 733 726 734 727 static int maple_set_vblank_interrupt_handler(void) 735 728 { 736 - return request_irq(HW_EVENT_VSYNC, maplebus_vblank_interrupt, 737 - IRQF_SHARED, "maple bus VBLANK", &maple_dummy_driver); 729 + return request_irq(HW_EVENT_VSYNC, maple_vblank_interrupt, 730 + IRQF_SHARED, "maple bus VBLANK", &maple_unsupported_device); 738 731 } 739 732 740 733 static int maple_get_dma_buffer(void) ··· 747 740 return 0; 748 741 } 749 742 750 - static int match_maple_bus_driver(struct device *devptr, 743 + static int maple_match_bus_driver(struct device *devptr, 751 744 struct device_driver *drvptr) 752 745 { 753 746 struct maple_driver *maple_drv = to_maple_driver(drvptr); ··· 772 765 { 773 766 } 774 767 775 - static struct maple_driver maple_dummy_driver = { 768 + static struct maple_driver maple_unsupported_device = { 776 769 .drv = { 777 - .name = "maple_dummy_driver", 770 + .name = "maple_unsupported_device", 778 771 .bus = &maple_bus_type, 779 772 }, 780 773 }; 781 - 774 + /** 775 + * maple_bus_type - core maple bus structure 776 + */ 782 777 struct bus_type maple_bus_type = { 783 778 .name = "maple", 784 - .match = match_maple_bus_driver, 779 + .match = maple_match_bus_driver, 785 780 .uevent = maple_bus_uevent, 786 781 }; 787 782 EXPORT_SYMBOL_GPL(maple_bus_type); ··· 797 788 { 798 789 int retval, i; 799 790 struct maple_device *mdev[MAPLE_PORTS]; 800 - ctrl_outl(0, MAPLE_STATE); 791 + 792 + ctrl_outl(0, MAPLE_ENABLE); 801 793 802 794 retval = device_register(&maple_bus); 803 795 if (retval) ··· 808 798 if (retval) 809 799 goto cleanup_device; 810 800 811 - retval = driver_register(&maple_dummy_driver.drv); 801 + retval = driver_register(&maple_unsupported_device.drv); 812 802 if (retval) 813 803 goto cleanup_bus; 814 804 815 805 /* allocate memory for maple bus dma */ 816 806 retval = maple_get_dma_buffer(); 817 807 if (retval) { 818 - printk(KERN_INFO 819 - "Maple bus: Failed to allocate Maple DMA buffers\n"); 808 + dev_err(&maple_bus, "failed to allocate DMA buffers\n"); 820 809 goto cleanup_basic; 821 810 } 822 811 823 812 /* set up DMA interrupt handler */ 824 813 retval = maple_set_dma_interrupt_handler(); 825 814 if (retval) { 826 - printk(KERN_INFO 827 - "Maple bus: Failed to grab maple DMA IRQ\n"); 815 + dev_err(&maple_bus, "bus failed to grab maple " 816 + "DMA IRQ\n"); 828 817 goto cleanup_dma; 829 818 } 830 819 831 820 /* set up VBLANK interrupt handler */ 832 821 retval = maple_set_vblank_interrupt_handler(); 833 822 if (retval) { 834 - printk(KERN_INFO "Maple bus: Failed to grab VBLANK IRQ\n"); 823 + dev_err(&maple_bus, "bus failed to grab VBLANK IRQ\n"); 835 824 goto cleanup_irq; 836 825 } 837 826 838 - maple_queue_cache = 839 - kmem_cache_create("maple_queue_cache", 0x400, 0, 840 - SLAB_HWCACHE_ALIGN, NULL); 827 + maple_queue_cache = KMEM_CACHE(maple_buffer, SLAB_HWCACHE_ALIGN); 841 828 842 829 if (!maple_queue_cache) 843 830 goto cleanup_bothirqs; ··· 845 838 /* setup maple ports */ 846 839 for (i = 0; i < MAPLE_PORTS; i++) { 847 840 checked[i] = false; 841 + empty[i] = false; 848 842 mdev[i] = maple_alloc_dev(i, 0); 849 - baseunits[i] = mdev[i]; 850 843 if (!mdev[i]) { 851 844 while (i-- > 0) 852 845 maple_free_dev(mdev[i]); 853 846 goto cleanup_cache; 854 847 } 848 + baseunits[i] = mdev[i]; 849 + atomic_set(&mdev[i]->busy, 1); 855 850 maple_add_packet(mdev[i], 0, MAPLE_COMMAND_DEVINFO, 0, NULL); 856 851 subdevice_map[i] = 0; 857 852 } 858 853 859 - /* setup maplebus hardware */ 860 - maplebus_dma_reset(); 861 - /* initial detection */ 854 + maple_pnp_time = jiffies + HZ; 855 + /* prepare initial queue */ 862 856 maple_send(); 863 - maple_pnp_time = jiffies; 864 - printk(KERN_INFO "Maple bus core now registered.\n"); 857 + dev_info(&maple_bus, "bus core now registered\n"); 865 858 866 859 return 0; 867 860 ··· 878 871 free_pages((unsigned long) maple_sendbuf, MAPLE_DMA_PAGES); 879 872 880 873 cleanup_basic: 881 - driver_unregister(&maple_dummy_driver.drv); 874 + driver_unregister(&maple_unsupported_device.drv); 882 875 883 876 cleanup_bus: 884 877 bus_unregister(&maple_bus_type); ··· 887 880 device_unregister(&maple_bus); 888 881 889 882 cleanup: 890 - printk(KERN_INFO "Maple bus registration failed\n"); 883 + printk(KERN_ERR "Maple bus registration failed\n"); 891 884 return retval; 892 885 } 893 886 /* Push init to later to ensure hardware gets detected */
+41 -21
include/linux/maple.h
··· 8 8 9 9 /* Maple Bus command and response codes */ 10 10 enum maple_code { 11 - MAPLE_RESPONSE_FILEERR = -5, 12 - MAPLE_RESPONSE_AGAIN = -4, /* request should be retransmitted */ 13 - MAPLE_RESPONSE_BADCMD = -3, 14 - MAPLE_RESPONSE_BADFUNC = -2, 15 - MAPLE_RESPONSE_NONE = -1, /* unit didn't respond at all */ 16 - MAPLE_COMMAND_DEVINFO = 1, 17 - MAPLE_COMMAND_ALLINFO = 2, 18 - MAPLE_COMMAND_RESET = 3, 19 - MAPLE_COMMAND_KILL = 4, 20 - MAPLE_RESPONSE_DEVINFO = 5, 21 - MAPLE_RESPONSE_ALLINFO = 6, 22 - MAPLE_RESPONSE_OK = 7, 23 - MAPLE_RESPONSE_DATATRF = 8, 24 - MAPLE_COMMAND_GETCOND = 9, 25 - MAPLE_COMMAND_GETMINFO = 10, 26 - MAPLE_COMMAND_BREAD = 11, 27 - MAPLE_COMMAND_BWRITE = 12, 28 - MAPLE_COMMAND_SETCOND = 14 11 + MAPLE_RESPONSE_FILEERR = -5, 12 + MAPLE_RESPONSE_AGAIN, /* retransmit */ 13 + MAPLE_RESPONSE_BADCMD, 14 + MAPLE_RESPONSE_BADFUNC, 15 + MAPLE_RESPONSE_NONE, /* unit didn't respond*/ 16 + MAPLE_COMMAND_DEVINFO = 1, 17 + MAPLE_COMMAND_ALLINFO, 18 + MAPLE_COMMAND_RESET, 19 + MAPLE_COMMAND_KILL, 20 + MAPLE_RESPONSE_DEVINFO, 21 + MAPLE_RESPONSE_ALLINFO, 22 + MAPLE_RESPONSE_OK, 23 + MAPLE_RESPONSE_DATATRF, 24 + MAPLE_COMMAND_GETCOND, 25 + MAPLE_COMMAND_GETMINFO, 26 + MAPLE_COMMAND_BREAD, 27 + MAPLE_COMMAND_BWRITE, 28 + MAPLE_COMMAND_BSYNC, 29 + MAPLE_COMMAND_SETCOND, 30 + MAPLE_COMMAND_MICCONTROL 31 + }; 32 + 33 + enum maple_file_errors { 34 + MAPLE_FILEERR_INVALID_PARTITION = 0x01000000, 35 + MAPLE_FILEERR_PHASE_ERROR = 0x02000000, 36 + MAPLE_FILEERR_INVALID_BLOCK = 0x04000000, 37 + MAPLE_FILEERR_WRITE_ERROR = 0x08000000, 38 + MAPLE_FILEERR_INVALID_WRITE_LENGTH = 0x10000000, 39 + MAPLE_FILEERR_BAD_CRC = 0x20000000 40 + }; 41 + 42 + struct maple_buffer { 43 + char bufx[0x400]; 44 + void *buf; 29 45 }; 30 46 31 47 struct mapleq { 32 48 struct list_head list; 33 49 struct maple_device *dev; 34 - void *sendbuf, *recvbuf, *recvbufdcsp; 50 + struct maple_buffer *recvbuf; 51 + void *sendbuf, *recvbuf_p2; 35 52 unsigned char length; 36 53 enum maple_code command; 37 - struct mutex mutex; 38 54 }; 39 55 40 56 struct maple_devinfo { ··· 68 52 struct maple_driver *driver; 69 53 struct mapleq *mq; 70 54 void (*callback) (struct mapleq * mq); 55 + void (*fileerr_handler)(struct maple_device *mdev, void *recvbuf); 56 + int (*can_unload)(struct maple_device *mdev); 71 57 unsigned long when, interval, function; 72 58 struct maple_devinfo devinfo; 73 59 unsigned char port, unit; 74 60 char product_name[32]; 75 61 char product_licence[64]; 62 + atomic_t busy; 63 + wait_queue_head_t maple_wait; 76 64 struct device dev; 77 65 }; 78 66 ··· 92 72 int maple_driver_register(struct maple_driver *); 93 73 void maple_driver_unregister(struct maple_driver *); 94 74 95 - int maple_add_packet_sleeps(struct maple_device *mdev, u32 function, 75 + int maple_add_packet(struct maple_device *mdev, u32 function, 96 76 u32 command, u32 length, void *data); 97 77 void maple_clear_dev(struct maple_device *mdev); 98 78